From eab253cba0dcba580552873407ccf3514b34c905 Mon Sep 17 00:00:00 2001 From: figbot <82115609+withfig-bot@users.noreply.github.com> Date: Tue, 17 Sep 2024 13:54:36 -0700 Subject: [PATCH] feat: update spec --- src/aws.ts | 27 +- src/aws/amplify.ts | 4 +- src/aws/appconfig.ts | 89 +- src/aws/application-signals.ts | 28 +- src/aws/autoscaling.ts | 6 +- src/aws/backup.ts | 204 ++-- src/aws/bedrock-agent-runtime.ts | 2 +- src/aws/bedrock-agent.ts | 22 +- src/aws/bedrock-runtime.ts | 4 +- src/aws/bedrock.ts | 535 ++++++++- src/aws/chatbot.ts | 327 ++++-- src/aws/chime-sdk-voice.ts | 5 +- src/aws/codebuild.ts | 4 +- src/aws/codepipeline.ts | 4 +- src/aws/cognito-idp.ts | 82 +- src/aws/connect.ts | 4 +- src/aws/datazone.ts | 741 ++++++++++++- src/aws/devicefarm.ts | 6 +- src/aws/dynamodb.ts | 4 +- src/aws/ecs.ts | 2 +- src/aws/elbv2.ts | 70 +- src/aws/finspace.ts | 2 +- src/aws/fis.ts | 67 ++ src/aws/gamelift.ts | 22 +- src/aws/glue.ts | 44 + src/aws/guardduty.ts | 82 +- src/aws/internetmonitor.ts | 2 +- src/aws/iot.ts | 236 ++++ src/aws/iotsitewise.ts | 248 ++++- src/aws/ivs-realtime.ts | 296 ++++- src/aws/ivs.ts | 16 +- src/aws/ivschat.ts | 10 +- src/aws/kafka.ts | 2 +- src/aws/lambda.ts | 221 +++- src/aws/logs.ts | 196 +++- src/aws/mediaconnect.ts | 44 + src/aws/mediaconvert.ts | 72 ++ src/aws/medialive.ts | 1783 ++++++++++++++++++++++++++---- src/aws/omics.ts | 20 +- src/aws/organizations.ts | 8 +- src/aws/pca-connector-scep.ts | 2 +- src/aws/pcs.ts | 1033 +++++++++++++++++ src/aws/personalize.ts | 57 +- src/aws/pipes.ts | 16 + src/aws/qapps.ts | 52 +- src/aws/qbusiness.ts | 53 +- src/aws/rds.ts | 23 +- src/aws/redshift-data.ts | 30 + src/aws/runtime.sagemaker.ts | 8 + src/aws/s3control.ts | 130 ++- src/aws/sagemaker-runtime.ts | 8 + src/aws/sagemaker.ts | 26 +- src/aws/securityhub.ts | 12 +- src/aws/ssm.ts | 46 +- src/aws/stepfunctions.ts | 16 + src/aws/storagegateway.ts | 74 +- src/aws/supplychain.ts | 5 +- src/aws/synthetics.ts | 14 +- src/aws/timestream-influxdb.ts | 16 + src/aws/wafv2.ts | 2 +- src/aws/workspaces.ts | 26 +- 61 files changed, 6496 insertions(+), 694 deletions(-) create mode 100644 src/aws/pcs.ts diff --git a/src/aws.ts b/src/aws.ts index d3883a0dcc28..a5197598b375 100644 --- a/src/aws.ts +++ b/src/aws.ts @@ -290,7 +290,8 @@ const completionSpec: Fig.Spec = { }, { name: "chatbot", - description: "AWS Chatbot API", + description: + "The AWS Chatbot API Reference provides descriptions, API request parameters, and the XML response for each of the AWS Chatbot API actions. AWS Chatbot APIs are currently available in the following Regions: US East (Ohio) - us-east-2 US West (Oregon) - us-west-2 Asia Pacific (Singapore) - ap-southeast-1 Europe (Ireland) - eu-west-1 The AWS Chatbot console can only be used in US East (Ohio). Your configuration data however, is stored in each of the relevant available Regions. Your AWS CloudTrail events are logged in whatever Region you call from, not US East (N. Virginia) by default", loadSpec: "aws/chatbot", }, { @@ -473,12 +474,6 @@ const completionSpec: Fig.Spec = { "CodePipeline Overview This is the CodePipeline API Reference. This guide provides descriptions of the actions and data types for CodePipeline. Some functionality for your pipeline can only be configured through the API. For more information, see the CodePipeline User Guide. You can use the CodePipeline API to work with pipelines, stages, actions, and transitions. Pipelines are models of automated release processes. Each pipeline is uniquely named, and consists of stages, actions, and transitions. You can work with pipelines by calling: CreatePipeline, which creates a uniquely named pipeline. DeletePipeline, which deletes the specified pipeline. GetPipeline, which returns information about the pipeline structure and pipeline metadata, including the pipeline Amazon Resource Name (ARN). GetPipelineExecution, which returns information about a specific execution of a pipeline. GetPipelineState, which returns information about the current state of the stages and actions of a pipeline. ListActionExecutions, which returns action-level details for past executions. The details include full stage and action-level details, including individual action duration, status, any errors that occurred during the execution, and input and output artifact location details. ListPipelines, which gets a summary of all of the pipelines associated with your account. ListPipelineExecutions, which gets a summary of the most recent executions for a pipeline. StartPipelineExecution, which runs the most recent revision of an artifact through the pipeline. StopPipelineExecution, which stops the specified pipeline execution from continuing through the pipeline. UpdatePipeline, which updates a pipeline with edits or changes to the structure of the pipeline. Pipelines include stages. Each stage contains one or more actions that must complete before the next stage begins. A stage results in success or failure. If a stage fails, the pipeline stops at that stage and remains stopped until either a new version of an artifact appears in the source location, or a user takes action to rerun the most recent artifact through the pipeline. You can call GetPipelineState, which displays the status of a pipeline, including the status of stages in the pipeline, or GetPipeline, which returns the entire structure of the pipeline, including the stages of that pipeline. For more information about the structure of stages and actions, see CodePipeline Pipeline Structure Reference. Pipeline stages include actions that are categorized into categories such as source or build actions performed in a stage of a pipeline. For example, you can use a source action to import artifacts into a pipeline from a source such as Amazon S3. Like stages, you do not work with actions directly in most cases, but you do define and interact with actions when working with pipeline operations such as CreatePipeline and GetPipelineState. Valid action categories are: Source Build Test Deploy Approval Invoke Pipelines also include transitions, which allow the transition of artifacts from one stage to the next in a pipeline after the actions in one stage complete. You can work with transitions by calling: DisableStageTransition, which prevents artifacts from transitioning to the next stage in a pipeline. EnableStageTransition, which enables transition of artifacts between stages in a pipeline. Using the API to integrate with CodePipeline For third-party integrators or developers who want to create their own integrations with CodePipeline, the expected sequence varies from the standard API user. To integrate with CodePipeline, developers need to work with the following items: Jobs, which are instances of an action. For example, a job for a source action might import a revision of an artifact from a source. You can work with jobs by calling: AcknowledgeJob, which confirms whether a job worker has received the specified job. GetJobDetails, which returns the details of a job. PollForJobs, which determines whether there are any jobs to act on. PutJobFailureResult, which provides details of a job failure. PutJobSuccessResult, which provides details of a job success. Third party jobs, which are instances of an action created by a partner action and integrated into CodePipeline. Partner actions are created by members of the Amazon Web Services Partner Network. You can work with third party jobs by calling: AcknowledgeThirdPartyJob, which confirms whether a job worker has received the specified job. GetThirdPartyJobDetails, which requests the details of a job for a partner action. PollForThirdPartyJobs, which determines whether there are any jobs to act on. PutThirdPartyJobFailureResult, which provides details of a job failure. PutThirdPartyJobSuccessResult, which provides details of a job success", loadSpec: "aws/codepipeline", }, - { - name: "codestar", - description: - "AWS CodeStar This is the API reference for AWS CodeStar. This reference provides descriptions of the operations and data types for the AWS CodeStar API along with usage examples. You can use the AWS CodeStar API to work with: Projects and their resources, by calling the following: DeleteProject, which deletes a project. DescribeProject, which lists the attributes of a project. ListProjects, which lists all projects associated with your AWS account. ListResources, which lists the resources associated with a project. ListTagsForProject, which lists the tags associated with a project. TagProject, which adds tags to a project. UntagProject, which removes tags from a project. UpdateProject, which updates the attributes of a project. Teams and team members, by calling the following: AssociateTeamMember, which adds an IAM user to the team for a project. DisassociateTeamMember, which removes an IAM user from the team for a project. ListTeamMembers, which lists all the IAM users in the team for a project, including their roles and attributes. UpdateTeamMember, which updates a team member's attributes in a project. Users, by calling the following: CreateUserProfile, which creates a user profile that contains data associated with the user across all projects. DeleteUserProfile, which deletes all user profile information across all projects. DescribeUserProfile, which describes the profile of a user. ListUserProfiles, which lists all user profiles. UpdateUserProfile, which updates the profile for a user", - loadSpec: "aws/codestar", - }, { name: "codestar-connections", description: @@ -1107,19 +1102,19 @@ const completionSpec: Fig.Spec = { { name: "ivs", description: - 'Introduction The Amazon Interactive Video Service (IVS) API is REST compatible, using a standard HTTP API and an Amazon Web Services EventBridge event stream for responses. JSON is used for both requests and responses, including errors. The API is an Amazon Web Services regional service. For a list of supported regions and Amazon IVS HTTPS service endpoints, see the Amazon IVS page in the Amazon Web Services General Reference. All API request parameters and URLs are case sensitive. For a summary of notable documentation changes in each release, see Document History. Allowed Header Values Accept: application/json Accept-Encoding: gzip, deflate Content-Type: application/json Key Concepts Channel \u2014 Stores configuration data related to your live stream. You first create a channel and then use the channel\u2019s stream key to start your live stream. Stream key \u2014 An identifier assigned by Amazon IVS when you create a channel, which is then used to authorize streaming. Treat the stream key like a secret, since it allows anyone to stream to the channel. Playback key pair \u2014 Video playback may be restricted using playback-authorization tokens, which use public-key encryption. A playback key pair is the public-private pair of keys used to sign and validate the playback-authorization token. Recording configuration \u2014 Stores configuration related to recording a live stream and where to store the recorded content. Multiple channels can reference the same recording configuration. Playback restriction policy \u2014 Restricts playback by countries and/or origin sites. For more information about your IVS live stream, also see Getting Started with IVS Low-Latency Streaming. Tagging A tag is a metadata label that you assign to an Amazon Web Services resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no service-specific constraints beyond what is documented there. Tags can help you identify and organize your Amazon Web Services resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags). The Amazon IVS API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resources support tagging: Channels, Stream Keys, Playback Key Pairs, and Recording Configurations. At most 50 tags can be applied to a resource. Authentication versus Authorization Note the differences between these concepts: Authentication is about verifying identity. You need to be authenticated to sign Amazon IVS API requests. Authorization is about granting permissions. Your IAM roles need to have permissions for Amazon IVS API requests. In addition, authorization is needed to view Amazon IVS private channels. (Private channels are channels that are enabled for "playback authorization.") Authentication All Amazon IVS API requests must be authenticated with a signature. The Amazon Web Services Command-Line Interface (CLI) and Amazon IVS Player SDKs take care of signing the underlying API calls for you. However, if your application calls the Amazon IVS API directly, it\u2019s your responsibility to sign the requests. You generate a signature using valid Amazon Web Services credentials that have permission to perform the requested action. For example, you must sign PutMetadata requests with a signature generated from a user account that has the ivs:PutMetadata permission. For more information: Authentication and generating signatures \u2014 See Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon Web Services General Reference. Managing Amazon IVS permissions \u2014 See Identity and Access Management on the Security page of the Amazon IVS User Guide. Amazon Resource Names (ARNs) ARNs uniquely identify AWS resources. An ARN is required when you need to specify a resource unambiguously across all of AWS, such as in IAM policies and API calls. For more information, see Amazon Resource Names in the AWS General Reference', + 'Introduction The Amazon Interactive Video Service (IVS) API is REST compatible, using a standard HTTP API and an Amazon Web Services EventBridge event stream for responses. JSON is used for both requests and responses, including errors. The API is an Amazon Web Services regional service. For a list of supported regions and Amazon IVS HTTPS service endpoints, see the Amazon IVS page in the Amazon Web Services General Reference. All API request parameters and URLs are case sensitive. For a summary of notable documentation changes in each release, see Document History. Allowed Header Values Accept: application/json Accept-Encoding: gzip, deflate Content-Type: application/json Key Concepts Channel \u2014 Stores configuration data related to your live stream. You first create a channel and then use the channel\u2019s stream key to start your live stream. Stream key \u2014 An identifier assigned by Amazon IVS when you create a channel, which is then used to authorize streaming. Treat the stream key like a secret, since it allows anyone to stream to the channel. Playback key pair \u2014 Video playback may be restricted using playback-authorization tokens, which use public-key encryption. A playback key pair is the public-private pair of keys used to sign and validate the playback-authorization token. Recording configuration \u2014 Stores configuration related to recording a live stream and where to store the recorded content. Multiple channels can reference the same recording configuration. Playback restriction policy \u2014 Restricts playback by countries and/or origin sites. For more information about your IVS live stream, also see Getting Started with IVS Low-Latency Streaming. Tagging A tag is a metadata label that you assign to an Amazon Web Services resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Best practices and strategies in Tagging Amazon Web Services Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no service-specific constraints beyond what is documented there. Tags can help you identify and organize your Amazon Web Services resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags). The Amazon IVS API has these tag-related operations: TagResource, UntagResource, and ListTagsForResource. The following resources support tagging: Channels, Stream Keys, Playback Key Pairs, and Recording Configurations. At most 50 tags can be applied to a resource. Authentication versus Authorization Note the differences between these concepts: Authentication is about verifying identity. You need to be authenticated to sign Amazon IVS API requests. Authorization is about granting permissions. Your IAM roles need to have permissions for Amazon IVS API requests. In addition, authorization is needed to view Amazon IVS private channels. (Private channels are channels that are enabled for "playback authorization.") Authentication All Amazon IVS API requests must be authenticated with a signature. The Amazon Web Services Command-Line Interface (CLI) and Amazon IVS Player SDKs take care of signing the underlying API calls for you. However, if your application calls the Amazon IVS API directly, it\u2019s your responsibility to sign the requests. You generate a signature using valid Amazon Web Services credentials that have permission to perform the requested action. For example, you must sign PutMetadata requests with a signature generated from a user account that has the ivs:PutMetadata permission. For more information: Authentication and generating signatures \u2014 See Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon Web Services General Reference. Managing Amazon IVS permissions \u2014 See Identity and Access Management on the Security page of the Amazon IVS User Guide. Amazon Resource Names (ARNs) ARNs uniquely identify AWS resources. An ARN is required when you need to specify a resource unambiguously across all of AWS, such as in IAM policies and API calls. For more information, see Amazon Resource Names in the AWS General Reference', loadSpec: "aws/ivs", }, { name: "ivs-realtime", description: - 'The Amazon Interactive Video Service (IVS) real-time API is REST compatible, using a standard HTTP API and an AWS EventBridge event stream for responses. JSON is used for both requests and responses, including errors. Key Concepts Stage \u2014 A virtual space where participants can exchange video in real time. Participant token \u2014 A token that authenticates a participant when they join a stage. Participant object \u2014 Represents participants (people) in the stage and contains information about them. When a token is created, it includes a participant ID; when a participant uses that token to join a stage, the participant is associated with that participant ID. There is a 1:1 mapping between participant tokens and participants. For server-side composition: Composition process \u2014 Composites participants of a stage into a single video and forwards it to a set of outputs (e.g., IVS channels). Composition endpoints support this process. Composition \u2014 Controls the look of the outputs, including how participants are positioned in the video. For more information about your IVS live stream, also see Getting Started with Amazon IVS Real-Time Streaming. Tagging A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging AWS Resources for more information, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS stages has no service-specific constraints beyond what is documented there. Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags). The Amazon IVS real-time API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Stage. At most 50 tags can be applied to a resource', + 'The Amazon Interactive Video Service (IVS) real-time API is REST compatible, using a standard HTTP API and an AWS EventBridge event stream for responses. JSON is used for both requests and responses, including errors. Key Concepts Stage \u2014 A virtual space where participants can exchange video in real time. Participant token \u2014 A token that authenticates a participant when they join a stage. Participant object \u2014 Represents participants (people) in the stage and contains information about them. When a token is created, it includes a participant ID; when a participant uses that token to join a stage, the participant is associated with that participant ID. There is a 1:1 mapping between participant tokens and participants. For server-side composition: Composition process \u2014 Composites participants of a stage into a single video and forwards it to a set of outputs (e.g., IVS channels). Composition operations support this process. Composition \u2014 Controls the look of the outputs, including how participants are positioned in the video. For more information about your IVS live stream, also see Getting Started with Amazon IVS Real-Time Streaming. Tagging A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Best practices and strategies in Tagging AWS Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS stages has no service-specific constraints beyond what is documented there. Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags). The Amazon IVS real-time API has these tag-related operations: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Stage. At most 50 tags can be applied to a resource', loadSpec: "aws/ivs-realtime", }, { name: "ivschat", description: - 'Introduction The Amazon IVS Chat control-plane API enables you to create and manage Amazon IVS Chat resources. You also need to integrate with the Amazon IVS Chat Messaging API, to enable users to interact with chat rooms in real time. The API is an AWS regional service. For a list of supported regions and Amazon IVS Chat HTTPS service endpoints, see the Amazon IVS Chat information on the Amazon IVS page in the AWS General Reference. This document describes HTTP operations. There is a separate messaging API for managing Chat resources; see the Amazon IVS Chat Messaging API Reference. Notes on terminology: You create service applications using the Amazon IVS Chat API. We refer to these as applications. You create front-end client applications (browser and Android/iOS apps) using the Amazon IVS Chat Messaging API. We refer to these as clients. Resources The following resources are part of Amazon IVS Chat: LoggingConfiguration \u2014 A configuration that allows customers to store and record sent messages in a chat room. See the Logging Configuration endpoints for more information. Room \u2014 The central Amazon IVS Chat resource through which clients connect to and exchange chat messages. See the Room endpoints for more information. Tagging A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging AWS Resources for more information, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS Chat has no service-specific constraints beyond what is documented there. Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags). The Amazon IVS Chat API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Room. At most 50 tags can be applied to a resource. API Access Security Your Amazon IVS Chat applications (service applications and clients) must be authenticated and authorized to access Amazon IVS Chat resources. Note the differences between these concepts: Authentication is about verifying identity. Requests to the Amazon IVS Chat API must be signed to verify your identity. Authorization is about granting permissions. Your IAM roles need to have permissions for Amazon IVS Chat API requests. Users (viewers) connect to a room using secure access tokens that you create using the CreateChatToken endpoint through the AWS SDK. You call CreateChatToken for every user\u2019s chat session, passing identity and authorization information about the user. Signing API Requests HTTP API requests must be signed with an AWS SigV4 signature using your AWS security credentials. The AWS Command Line Interface (CLI) and the AWS SDKs take care of signing the underlying API calls for you. However, if your application calls the Amazon IVS Chat HTTP API directly, it\u2019s your responsibility to sign the requests. You generate a signature using valid AWS credentials for an IAM role that has permission to perform the requested action. For example, DeleteMessage requests must be made using an IAM role that has the ivschat:DeleteMessage permission. For more information: Authentication and generating signatures \u2014 See Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon Web Services General Reference. Managing Amazon IVS permissions \u2014 See Identity and Access Management on the Security page of the Amazon IVS User Guide. Amazon Resource Names (ARNs) ARNs uniquely identify AWS resources. An ARN is required when you need to specify a resource unambiguously across all of AWS, such as in IAM policies and API calls. For more information, see Amazon Resource Names in the AWS General Reference', + 'Introduction The Amazon IVS Chat control-plane API enables you to create and manage Amazon IVS Chat resources. You also need to integrate with the Amazon IVS Chat Messaging API, to enable users to interact with chat rooms in real time. The API is an AWS regional service. For a list of supported regions and Amazon IVS Chat HTTPS service endpoints, see the Amazon IVS Chat information on the Amazon IVS page in the AWS General Reference. This document describes HTTP operations. There is a separate messaging API for managing Chat resources; see the Amazon IVS Chat Messaging API Reference. Notes on terminology: You create service applications using the Amazon IVS Chat API. We refer to these as applications. You create front-end client applications (browser and Android/iOS apps) using the Amazon IVS Chat Messaging API. We refer to these as clients. Resources The following resources are part of Amazon IVS Chat: LoggingConfiguration \u2014 A configuration that allows customers to store and record sent messages in a chat room. See the Logging Configuration endpoints for more information. Room \u2014 The central Amazon IVS Chat resource through which clients connect to and exchange chat messages. See the Room endpoints for more information. Tagging A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Best practices and strategies in Tagging Amazon Web Services Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS Chat has no service-specific constraints beyond what is documented there. Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags). The Amazon IVS Chat API has these tag-related operations: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Room. At most 50 tags can be applied to a resource. API Access Security Your Amazon IVS Chat applications (service applications and clients) must be authenticated and authorized to access Amazon IVS Chat resources. Note the differences between these concepts: Authentication is about verifying identity. Requests to the Amazon IVS Chat API must be signed to verify your identity. Authorization is about granting permissions. Your IAM roles need to have permissions for Amazon IVS Chat API requests. Users (viewers) connect to a room using secure access tokens that you create using the CreateChatToken operation through the AWS SDK. You call CreateChatToken for every user\u2019s chat session, passing identity and authorization information about the user. Signing API Requests HTTP API requests must be signed with an AWS SigV4 signature using your AWS security credentials. The AWS Command Line Interface (CLI) and the AWS SDKs take care of signing the underlying API calls for you. However, if your application calls the Amazon IVS Chat HTTP API directly, it\u2019s your responsibility to sign the requests. You generate a signature using valid AWS credentials for an IAM role that has permission to perform the requested action. For example, DeleteMessage requests must be made using an IAM role that has the ivschat:DeleteMessage permission. For more information: Authentication and generating signatures \u2014 See Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon Web Services General Reference. Managing Amazon IVS permissions \u2014 See Identity and Access Management on the Security page of the Amazon IVS User Guide. Amazon Resource Names (ARNs) ARNs uniquely identify AWS resources. An ARN is required when you need to specify a resource unambiguously across all of AWS, such as in IAM policies and API calls. For more information, see Amazon Resource Names in the AWS General Reference', loadSpec: "aws/ivschat", }, { @@ -1602,9 +1597,15 @@ const completionSpec: Fig.Spec = { { name: "pca-connector-scep", description: - "Connector for SCEP (Preview) is in preview release for Amazon Web Services Private Certificate Authority and is subject to change. Connector for SCEP (Preview) creates a connector between Amazon Web Services Private CA and your SCEP-enabled clients and devices. For more information, see Connector for SCEP in the Amazon Web Services Private CA User Guide", + "Connector for SCEP creates a connector between Amazon Web Services Private CA and your SCEP-enabled clients and devices. For more information, see Connector for SCEP in the Amazon Web Services Private CA User Guide", loadSpec: "aws/pca-connector-scep", }, + { + name: "pcs", + description: + "Amazon Web Services Parallel Computing Service (Amazon Web Services PCS) is a managed service that makes it easier for you to run and scale your high performance computing (HPC) workloads, and build scientific and engineering models on Amazon Web Services using Slurm. For more information, see the Amazon Web Services Parallel Computing Service User Guide. This reference describes the actions and data types of the service management API. You can use the Amazon Web Services SDKs to call the API actions in software, or use the Command Line Interface (CLI) to call the API actions manually. These API actions manage the service through an Amazon Web Services account. The API actions operate on Amazon Web Services PCS resources. A resource is an entity in Amazon Web Services that you can work with. Amazon Web Services services create resources when you use the features of the service. Examples of Amazon Web Services PCS resources include clusters, compute node groups, and queues. For more information about resources in Amazon Web Services, see Resource in the Resource Explorer User Guide. An Amazon Web Services PCS compute node is an Amazon EC2 instance. You don't launch compute nodes directly. Amazon Web Services PCS uses configuration information that you provide to launch compute nodes in your Amazon Web Services account. You receive billing charges for your running compute nodes. Amazon Web Services PCS automatically terminates your compute nodes when you delete the Amazon Web Services PCS resources related to those compute nodes", + loadSpec: "aws/pcs", + }, { name: "personalize", description: @@ -1683,7 +1684,7 @@ const completionSpec: Fig.Spec = { { name: "qapps", description: - "The Amazon Q Apps feature capability within Amazon Q Business allows web experience users to create lightweight, purpose-built AI apps to fulfill specific tasks from within their web experience. For example, users can create an Q Appthat exclusively generates marketing-related content to improve your marketing team's productivity or a Q App for marketing content-generation like writing customer emails and creating promotional content using a certain style of voice, tone, and branding. For more information, see Amazon Q App in the Amazon Q Business User Guide", + "The Amazon Q Apps feature capability within Amazon Q Business allows web experience users to create lightweight, purpose-built AI apps to fulfill specific tasks from within their web experience. For example, users can create a Q App that exclusively generates marketing-related content to improve your marketing team's productivity or a Q App for writing customer emails and creating promotional content using a certain style of voice, tone, and branding. For more information on the capabilities, see Amazon Q Apps capabilities in the Amazon Q Business User Guide. For an overview of the Amazon Q App APIs, see Overview of Amazon Q Apps API operations. For information about the IAM access control permissions you need to use the Amazon Q Apps API, see IAM role for the Amazon Q Business web experience including Amazon Q Apps in the Amazon Q Business User Guide", loadSpec: "aws/qapps", }, { @@ -1936,7 +1937,7 @@ const completionSpec: Fig.Spec = { { name: "securityhub", description: - "Security Hub provides you with a comprehensive view of your security state in Amazon Web Services and helps you assess your Amazon Web Services environment against security industry standards and best practices. Security Hub collects security data across Amazon Web Services accounts, Amazon Web Services, and supported third-party products and helps you analyze your security trends and identify the highest priority security issues. To help you manage the security state of your organization, Security Hub supports multiple security standards. These include the Amazon Web Services Foundational Security Best Practices (FSBP) standard developed by Amazon Web Services, and external compliance frameworks such as the Center for Internet Security (CIS), the Payment Card Industry Data Security Standard (PCI DSS), and the National Institute of Standards and Technology (NIST). Each standard includes several security controls, each of which represents a security best practice. Security Hub runs checks against security controls and generates control findings to help you assess your compliance against security best practices. In addition to generating control findings, Security Hub also receives findings from other Amazon Web Services, such as Amazon GuardDuty and Amazon Inspector, and supported third-party products. This gives you a single pane of glass into a variety of security-related issues. You can also send Security Hub findings to other Amazon Web Services and supported third-party products. Security Hub offers automation features that help you triage and remediate security issues. For example, you can use automation rules to automatically update critical findings when a security check fails. You can also leverage the integration with Amazon EventBridge to trigger automatic responses to specific findings. This guide, the Security Hub API Reference, provides information about the Security Hub API. This includes supported resources, HTTP methods, parameters, and schemas. If you're new to Security Hub, you might find it helpful to also review the Security Hub User Guide . The user guide explains key concepts and provides procedures that demonstrate how to use Security Hub features. It also provides information about topics such as integrating Security Hub with other Amazon Web Services. In addition to interacting with Security Hub by making calls to the Security Hub API, you can use a current version of an Amazon Web Services command line tool or SDK. Amazon Web Services provides tools and SDKs that consist of libraries and sample code for various languages and platforms, such as PowerShell, Java, Go, Python, C++, and .NET. These tools and SDKs provide convenient, programmatic access to Security Hub and other Amazon Web Services . They also handle tasks such as signing requests, managing errors, and retrying requests automatically. For information about installing and using the Amazon Web Services tools and SDKs, see Tools to Build on Amazon Web Services. With the exception of operations that are related to central configuration, Security Hub API requests are executed only in the Amazon Web Services Region that is currently active or in the specific Amazon Web Services Region that you specify in your request. Any configuration or settings change that results from the operation is applied only to that Region. To make the same change in other Regions, call the same API operation in each Region in which you want to apply the change. When you use central configuration, API requests for enabling Security Hub, standards, and controls are executed in the home Region and all linked Regions. For a list of central configuration operations, see the Central configuration terms and concepts section of the Security Hub User Guide. The following throttling limits apply to Security Hub API operations. BatchEnableStandards - RateLimit of 1 request per second. BurstLimit of 1 request per second. GetFindings - RateLimit of 3 requests per second. BurstLimit of 6 requests per second. BatchImportFindings - RateLimit of 10 requests per second. BurstLimit of 30 requests per second. BatchUpdateFindings - RateLimit of 10 requests per second. BurstLimit of 30 requests per second. UpdateStandardsControl - RateLimit of 1 request per second. BurstLimit of 5 requests per second. All other operations - RateLimit of 10 requests per second. BurstLimit of 30 requests per second", + "Security Hub provides you with a comprehensive view of your security state in Amazon Web Services and helps you assess your Amazon Web Services environment against security industry standards and best practices. Security Hub collects security data across Amazon Web Services accounts, Amazon Web Servicesservices, and supported third-party products and helps you analyze your security trends and identify the highest priority security issues. To help you manage the security state of your organization, Security Hub supports multiple security standards. These include the Amazon Web Services Foundational Security Best Practices (FSBP) standard developed by Amazon Web Services, and external compliance frameworks such as the Center for Internet Security (CIS), the Payment Card Industry Data Security Standard (PCI DSS), and the National Institute of Standards and Technology (NIST). Each standard includes several security controls, each of which represents a security best practice. Security Hub runs checks against security controls and generates control findings to help you assess your compliance against security best practices. In addition to generating control findings, Security Hub also receives findings from other Amazon Web Servicesservices, such as Amazon GuardDuty and Amazon Inspector, and supported third-party products. This gives you a single pane of glass into a variety of security-related issues. You can also send Security Hub findings to other Amazon Web Servicesservices and supported third-party products. Security Hub offers automation features that help you triage and remediate security issues. For example, you can use automation rules to automatically update critical findings when a security check fails. You can also leverage the integration with Amazon EventBridge to trigger automatic responses to specific findings. This guide, the Security Hub API Reference, provides information about the Security Hub API. This includes supported resources, HTTP methods, parameters, and schemas. If you're new to Security Hub, you might find it helpful to also review the Security Hub User Guide . The user guide explains key concepts and provides procedures that demonstrate how to use Security Hub features. It also provides information about topics such as integrating Security Hub with other Amazon Web Servicesservices. In addition to interacting with Security Hub by making calls to the Security Hub API, you can use a current version of an Amazon Web Services command line tool or SDK. Amazon Web Services provides tools and SDKs that consist of libraries and sample code for various languages and platforms, such as PowerShell, Java, Go, Python, C++, and .NET. These tools and SDKs provide convenient, programmatic access to Security Hub and other Amazon Web Servicesservices . They also handle tasks such as signing requests, managing errors, and retrying requests automatically. For information about installing and using the Amazon Web Services tools and SDKs, see Tools to Build on Amazon Web Services. With the exception of operations that are related to central configuration, Security Hub API requests are executed only in the Amazon Web Services Region that is currently active or in the specific Amazon Web Services Region that you specify in your request. Any configuration or settings change that results from the operation is applied only to that Region. To make the same change in other Regions, call the same API operation in each Region in which you want to apply the change. When you use central configuration, API requests for enabling Security Hub, standards, and controls are executed in the home Region and all linked Regions. For a list of central configuration operations, see the Central configuration terms and concepts section of the Security Hub User Guide. The following throttling limits apply to Security Hub API operations. BatchEnableStandards - RateLimit of 1 request per second. BurstLimit of 1 request per second. GetFindings - RateLimit of 3 requests per second. BurstLimit of 6 requests per second. BatchImportFindings - RateLimit of 10 requests per second. BurstLimit of 30 requests per second. BatchUpdateFindings - RateLimit of 10 requests per second. BurstLimit of 30 requests per second. UpdateStandardsControl - RateLimit of 1 request per second. BurstLimit of 5 requests per second. All other operations - RateLimit of 10 requests per second. BurstLimit of 30 requests per second", loadSpec: "aws/securityhub", }, { diff --git a/src/aws/amplify.ts b/src/aws/amplify.ts index 0fa0ffecca97..f870551bfd5f 100644 --- a/src/aws/amplify.ts +++ b/src/aws/amplify.ts @@ -359,7 +359,7 @@ const completionSpec: Fig.Spec = { { name: "--platform", description: - "The platform for the Amplify app. For a static app, set the platform type to WEB. For a dynamic server-side rendered (SSR) app, set the platform type to WEB_COMPUTE. For an app requiring Amplify Hosting's original SSR support only, set the platform type to WEB_DYNAMIC", + "The platform for the Amplify app. For a static app, set the platform type to WEB. For a dynamic server-side rendered (SSR) app, set the platform type to WEB_COMPUTE. For an app requiring Amplify Hosting's original SSR support only, set the platform type to WEB_DYNAMIC. If you are deploying an SSG only app with Next.js version 14 or later, you must set the platform type to WEB_COMPUTE and set the artifacts baseDirectory to .next in the application's build settings. For an example of the build specification settings, see Amplify build settings for a Next.js 14 SSG application in the Amplify Hosting User Guide", args: { name: "string", suggestions: ["WEB"], @@ -2282,7 +2282,7 @@ const completionSpec: Fig.Spec = { { name: "--platform", description: - "The platform for the Amplify app. For a static app, set the platform type to WEB. For a dynamic server-side rendered (SSR) app, set the platform type to WEB_COMPUTE. For an app requiring Amplify Hosting's original SSR support only, set the platform type to WEB_DYNAMIC", + "The platform for the Amplify app. For a static app, set the platform type to WEB. For a dynamic server-side rendered (SSR) app, set the platform type to WEB_COMPUTE. For an app requiring Amplify Hosting's original SSR support only, set the platform type to WEB_DYNAMIC. If you are deploying an SSG only app with Next.js version 14 or later, you must set the platform type to WEB_COMPUTE", args: { name: "string", suggestions: ["WEB"], diff --git a/src/aws/appconfig.ts b/src/aws/appconfig.ts index 293fd5205d0b..0f06aff409c2 100644 --- a/src/aws/appconfig.ts +++ b/src/aws/appconfig.ts @@ -424,7 +424,7 @@ const completionSpec: Fig.Spec = { { name: "create-hosted-configuration-version", description: - "Creates a new configuration in the AppConfig hosted configuration store", + "Creates a new configuration in the AppConfig hosted configuration store. If you're creating a feature flag, we recommend you familiarize yourself with the JSON schema for feature flag data. For more information, see Type reference for AWS.AppConfig.FeatureFlags in the AppConfig User Guide", options: [ { name: "--application-id", @@ -450,7 +450,7 @@ const completionSpec: Fig.Spec = { { name: "--content", description: - "The content of the configuration or the configuration data", + "The configuration data, as bytes. AppConfig accepts any type of data, including text formats like JSON or TOML, or binary formats like protocol buffers or compressed data", args: { name: "blob", }, @@ -490,8 +490,7 @@ const completionSpec: Fig.Spec = { }, { name: "delete-application", - description: - "Deletes an application. Deleting an application does not delete a configuration from a host", + description: "Deletes an application", options: [ { name: "--application-id", @@ -522,7 +521,7 @@ const completionSpec: Fig.Spec = { { name: "delete-configuration-profile", description: - "Deletes a configuration profile. Deleting a configuration profile does not delete a configuration from a host", + "Deletes a configuration profile. To prevent users from unintentionally deleting actively-used configuration profiles, enable deletion protection", options: [ { name: "--application-id", @@ -539,6 +538,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--deletion-protection-check", + description: + "A parameter to configure deletion protection. If enabled, deletion protection prevents a user from deleting a configuration profile if your application has called either GetLatestConfiguration or for the configuration profile during the specified interval. This parameter supports the following values: BYPASS: Instructs AppConfig to bypass the deletion protection check and delete a configuration profile even if deletion protection would have otherwise prevented it. APPLY: Instructs the deletion protection check to run, even if deletion protection is disabled at the account level. APPLY also forces the deletion protection check to run against resources created in the past hour, which are normally excluded from deletion protection checks. ACCOUNT_DEFAULT: The default setting, which instructs AppConfig to implement the deletion protection value specified in the UpdateAccountSettings API", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -560,8 +567,7 @@ const completionSpec: Fig.Spec = { }, { name: "delete-deployment-strategy", - description: - "Deletes a deployment strategy. Deleting a deployment strategy does not delete a configuration from a host", + description: "Deletes a deployment strategy", options: [ { name: "--deployment-strategy-id", @@ -592,8 +598,15 @@ const completionSpec: Fig.Spec = { { name: "delete-environment", description: - "Deletes an environment. Deleting an environment does not delete a configuration from a host", + "Deletes an environment. To prevent users from unintentionally deleting actively-used environments, enable deletion protection", options: [ + { + name: "--environment-id", + description: "The ID of the environment that you want to delete", + args: { + name: "string", + }, + }, { name: "--application-id", description: @@ -603,8 +616,9 @@ const completionSpec: Fig.Spec = { }, }, { - name: "--environment-id", - description: "The ID of the environment that you want to delete", + name: "--deletion-protection-check", + description: + "A parameter to configure deletion protection. If enabled, deletion protection prevents a user from deleting an environment if your application called either GetLatestConfiguration or in the environment during the specified interval. This parameter supports the following values: BYPASS: Instructs AppConfig to bypass the deletion protection check and delete a configuration profile even if deletion protection would have otherwise prevented it. APPLY: Instructs the deletion protection check to run, even if deletion protection is disabled at the account level. APPLY also forces the deletion protection check to run against resources created in the past hour, which are normally excluded from deletion protection checks. ACCOUNT_DEFAULT: The default setting, which instructs AppConfig to implement the deletion protection value specified in the UpdateAccountSettings API", args: { name: "string", }, @@ -744,6 +758,30 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-account-settings", + description: + "Returns information about the status of the DeletionProtection parameter", + options: [ + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-application", description: "Retrieves information about an application", @@ -1927,6 +1965,37 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "update-account-settings", + description: "Updates the value of the DeletionProtection parameter", + options: [ + { + name: "--deletion-protection", + description: + "A parameter to configure deletion protection. If enabled, deletion protection prevents a user from deleting a configuration profile or an environment if AppConfig has called either GetLatestConfiguration or for the configuration profile or from the environment during the specified interval. Deletion protection is disabled by default. The default interval for ProtectionPeriodInMinutes is 60", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "update-application", description: "Updates an application", diff --git a/src/aws/application-signals.ts b/src/aws/application-signals.ts index 16cfd7465e23..e808578f965a 100644 --- a/src/aws/application-signals.ts +++ b/src/aws/application-signals.ts @@ -6,7 +6,7 @@ const completionSpec: Fig.Spec = { { name: "batch-get-service-level-objective-budget-report", description: - "Use this operation to retrieve one or more service level objective (SLO) budget reports. An error budget is the amount of time in unhealthy periods that your service can accumulate during an interval before your overall SLO budget health is breached and the SLO is considered to be unmet. For example, an SLO with a threshold of 99.95% and a monthly interval translates to an error budget of 21.9 minutes of downtime in a 30-day month. Budget reports include a health indicator, the attainment value, and remaining budget. For more information about SLO error budgets, see SLO concepts", + "Use this operation to retrieve one or more service level objective (SLO) budget reports. An error budget is the amount of time or requests in an unhealthy state that your service can accumulate during an interval before your overall SLO budget health is breached and the SLO is considered to be unmet. For example, an SLO with a threshold of 99.95% and a monthly interval translates to an error budget of 21.9 minutes of downtime in a 30-day month. Budget reports include a health indicator, the attainment value, and remaining budget. For more information about SLO error budgets, see SLO concepts", options: [ { name: "--timestamp", @@ -46,7 +46,7 @@ const completionSpec: Fig.Spec = { { name: "create-service-level-objective", description: - "Creates a service level objective (SLO), which can help you ensure that your critical business operations are meeting customer expectations. Use SLOs to set and track specific target levels for the reliability and availability of your applications and services. SLOs use service level indicators (SLIs) to calculate whether the application is performing at the level that you want. Create an SLO to set a target for a service or operation\u2019s availability or latency. CloudWatch measures this target frequently you can find whether it has been breached. When you create an SLO, you set an attainment goal for it. An attainment goal is the ratio of good periods that meet the threshold requirements to the total periods within the interval. For example, an attainment goal of 99.9% means that within your interval, you are targeting 99.9% of the periods to be in healthy state. After you have created an SLO, you can retrieve error budget reports for it. An error budget is the number of periods or amount of time that your service can accumulate during an interval before your overall SLO budget health is breached and the SLO is considered to be unmet. for example, an SLO with a threshold that 99.95% of requests must be completed under 2000ms every month translates to an error budget of 21.9 minutes of downtime per month. When you call this operation, Application Signals creates the AWSServiceRoleForCloudWatchApplicationSignals service-linked role, if it doesn't already exist in your account. This service- linked role has the following permissions: xray:GetServiceGraph logs:StartQuery logs:GetQueryResults cloudwatch:GetMetricData cloudwatch:ListMetrics tag:GetResources autoscaling:DescribeAutoScalingGroups You can easily set SLO targets for your applications that are discovered by Application Signals, using critical metrics such as latency and availability. You can also set SLOs against any CloudWatch metric or math expression that produces a time series. For more information about SLOs, see Service level objectives (SLOs)", + "Creates a service level objective (SLO), which can help you ensure that your critical business operations are meeting customer expectations. Use SLOs to set and track specific target levels for the reliability and availability of your applications and services. SLOs use service level indicators (SLIs) to calculate whether the application is performing at the level that you want. Create an SLO to set a target for a service or operation\u2019s availability or latency. CloudWatch measures this target frequently you can find whether it has been breached. The target performance quality that is defined for an SLO is the attainment goal. You can set SLO targets for your applications that are discovered by Application Signals, using critical metrics such as latency and availability. You can also set SLOs against any CloudWatch metric or math expression that produces a time series. When you create an SLO, you specify whether it is a period-based SLO or a request-based SLO. Each type of SLO has a different way of evaluating your application's performance against its attainment goal. A period-based SLO uses defined periods of time within a specified total time interval. For each period of time, Application Signals determines whether the application met its goal. The attainment rate is calculated as the number of good periods/number of total periods. For example, for a period-based SLO, meeting an attainment goal of 99.9% means that within your interval, your application must meet its performance goal during at least 99.9% of the time periods. A request-based SLO doesn't use pre-defined periods of time. Instead, the SLO measures number of good requests/number of total requests during the interval. At any time, you can find the ratio of good requests to total requests for the interval up to the time stamp that you specify, and measure that ratio against the goal set in your SLO. After you have created an SLO, you can retrieve error budget reports for it. An error budget is the amount of time or amount of requests that your application can be non-compliant with the SLO's goal, and still have your application meet the goal. For a period-based SLO, the error budget starts at a number defined by the highest number of periods that can fail to meet the threshold, while still meeting the overall goal. The remaining error budget decreases with every failed period that is recorded. The error budget within one interval can never increase. For example, an SLO with a threshold that 99.95% of requests must be completed under 2000ms every month translates to an error budget of 21.9 minutes of downtime per month. For a request-based SLO, the remaining error budget is dynamic and can increase or decrease, depending on the ratio of good requests to total requests. For more information about SLOs, see Service level objectives (SLOs). When you perform a CreateServiceLevelObjective operation, Application Signals creates the AWSServiceRoleForCloudWatchApplicationSignals service-linked role, if it doesn't already exist in your account. This service- linked role has the following permissions: xray:GetServiceGraph logs:StartQuery logs:GetQueryResults cloudwatch:GetMetricData cloudwatch:ListMetrics tag:GetResources autoscaling:DescribeAutoScalingGroups", options: [ { name: "--name", @@ -65,7 +65,15 @@ const completionSpec: Fig.Spec = { { name: "--sli-config", description: - "A structure that contains information about what service and what performance metric that this SLO will monitor", + "If this SLO is a period-based SLO, this structure defines the information about what performance metric this SLO will monitor. You can't specify both RequestBasedSliConfig and SliConfig in the same operation", + args: { + name: "structure", + }, + }, + { + name: "--request-based-sli-config", + description: + "If this SLO is a request-based SLO, this structure defines the information about what performance metric this SLO will monitor. You can't specify both RequestBasedSliConfig and SliConfig in the same operation", args: { name: "structure", }, @@ -73,7 +81,7 @@ const completionSpec: Fig.Spec = { { name: "--goal", description: - "A structure that contains the attributes that determine the goal of the SLO. This includes the time period for evaluation and the attainment threshold", + "This structure contains the attributes that determine the goal of the SLO", args: { name: "structure", }, @@ -775,7 +783,7 @@ const completionSpec: Fig.Spec = { { name: "update-service-level-objective", description: - "Updates an existing service level objective (SLO). If you omit parameters, the previous values of those parameters are retained", + "Updates an existing service level objective (SLO). If you omit parameters, the previous values of those parameters are retained. You cannot change from a period-based SLO to a request-based SLO, or change from a request-based SLO to a period-based SLO", options: [ { name: "--id", @@ -795,7 +803,15 @@ const completionSpec: Fig.Spec = { { name: "--sli-config", description: - "A structure that contains information about what performance metric this SLO will monitor", + "If this SLO is a period-based SLO, this structure defines the information about what performance metric this SLO will monitor", + args: { + name: "structure", + }, + }, + { + name: "--request-based-sli-config", + description: + "If this SLO is a request-based SLO, this structure defines the information about what performance metric this SLO will monitor. You can't specify both SliConfig and RequestBasedSliConfig in the same operation", args: { name: "structure", }, diff --git a/src/aws/autoscaling.ts b/src/aws/autoscaling.ts index fffa22422b69..5e0b6f49b583 100644 --- a/src/aws/autoscaling.ts +++ b/src/aws/autoscaling.ts @@ -432,7 +432,7 @@ const completionSpec: Fig.Spec = { { name: "--health-check-type", description: - "A comma-separated value string of one or more health check types. The valid values are EC2, ELB, and VPC_LATTICE. EC2 is the default health check and cannot be disabled. For more information, see Health checks for instances in an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. Only specify EC2 if you must clear a value that was previously set", + "A comma-separated value string of one or more health check types. The valid values are EC2, EBS, ELB, and VPC_LATTICE. EC2 is the default health check and cannot be disabled. For more information, see Health checks for instances in an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. Only specify EC2 if you must clear a value that was previously set", args: { name: "string", }, @@ -3311,7 +3311,7 @@ const completionSpec: Fig.Spec = { { name: "set-instance-health", description: - "Sets the health status of the specified instance. For more information, see Health checks for instances in an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide", + "Sets the health status of the specified instance. For more information, see Set up a custom health check for your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide", options: [ { name: "--instance-id", @@ -3618,7 +3618,7 @@ const completionSpec: Fig.Spec = { { name: "--health-check-type", description: - "A comma-separated value string of one or more health check types. The valid values are EC2, ELB, and VPC_LATTICE. EC2 is the default health check and cannot be disabled. For more information, see Health checks for instances in an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. Only specify EC2 if you must clear a value that was previously set", + "A comma-separated value string of one or more health check types. The valid values are EC2, EBS, ELB, and VPC_LATTICE. EC2 is the default health check and cannot be disabled. For more information, see Health checks for instances in an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. Only specify EC2 if you must clear a value that was previously set", args: { name: "string", }, diff --git a/src/aws/backup.ts b/src/aws/backup.ts index 7d51939e5365..4737714dba0b 100644 --- a/src/aws/backup.ts +++ b/src/aws/backup.ts @@ -6,12 +6,11 @@ const completionSpec: Fig.Spec = { { name: "cancel-legal-hold", description: - "This action removes the specified legal hold on a recovery point. This action can only be performed by a user with sufficient permissions", + "Removes the specified legal hold on a recovery point. This action can only be performed by a user with sufficient permissions", options: [ { name: "--legal-hold-id", - description: - "Legal hold ID required to remove the specified legal hold on a recovery point", + description: "The ID of the legal hold", args: { name: "string", }, @@ -19,7 +18,7 @@ const completionSpec: Fig.Spec = { { name: "--cancel-description", description: - "String describing the reason for removing the legal hold", + "A string the describes the reason for removing the legal hold", args: { name: "string", }, @@ -27,7 +26,7 @@ const completionSpec: Fig.Spec = { { name: "--retain-record-in-days", description: - "The integer amount in days specifying amount of days after this API operation to remove legal hold", + "The integer amount, in days, after which to remove legal hold", args: { name: "long", }, @@ -59,15 +58,14 @@ const completionSpec: Fig.Spec = { { name: "--backup-plan", description: - "Specifies the body of a backup plan. Includes a BackupPlanName and one or more sets of Rules", + "The body of a backup plan. Includes a BackupPlanName and one or more sets of Rules", args: { name: "structure", }, }, { name: "--backup-plan-tags", - description: - "To help organize your resources, you can assign your own metadata to the resources that you create. Each tag is a key-value pair. The specified tags are assigned to all backups created with this plan", + description: "The tags to assign to the backup plan", args: { name: "map", }, @@ -106,8 +104,7 @@ const completionSpec: Fig.Spec = { options: [ { name: "--backup-plan-id", - description: - "Uniquely identifies the backup plan to be associated with the selection of resources", + description: "The ID of the backup plan", args: { name: "string", }, @@ -115,7 +112,7 @@ const completionSpec: Fig.Spec = { { name: "--backup-selection", description: - "Specifies the body of a request to assign a set of resources to a backup plan", + "The body of a request to assign a set of resources to a backup plan", args: { name: "structure", }, @@ -162,8 +159,7 @@ const completionSpec: Fig.Spec = { }, { name: "--backup-vault-tags", - description: - "Metadata that you can assign to help organize the resources that you create. Each tag is a key-value pair", + description: "The tags to assign to the backup vault", args: { name: "map", }, @@ -227,7 +223,7 @@ const completionSpec: Fig.Spec = { { name: "--framework-controls", description: - "A list of the controls that make up the framework. Each control in the list has a name, input parameters, and scope", + "The controls that make up the framework. Each control in the list has a name, input parameters, and scope", args: { name: "list", }, @@ -242,8 +238,7 @@ const completionSpec: Fig.Spec = { }, { name: "--framework-tags", - description: - "Metadata that you can assign to help organize the frameworks that you create. Each tag is a key-value pair", + description: "The tags to assign to the framework", args: { name: "map", }, @@ -270,18 +265,18 @@ const completionSpec: Fig.Spec = { { name: "create-legal-hold", description: - "This action creates a legal hold on a recovery point (backup). A legal hold is a restraint on altering or deleting a backup until an authorized user cancels the legal hold. Any actions to delete or disassociate a recovery point will fail with an error if one or more active legal holds are on the recovery point", + "Creates a legal hold on a recovery point (backup). A legal hold is a restraint on altering or deleting a backup until an authorized user cancels the legal hold. Any actions to delete or disassociate a recovery point will fail with an error if one or more active legal holds are on the recovery point", options: [ { name: "--title", - description: "This is the string title of the legal hold", + description: "The title of the legal hold", args: { name: "string", }, }, { name: "--description", - description: "This is the string description of the legal hold", + description: "The description of the legal hold", args: { name: "string", }, @@ -297,7 +292,7 @@ const completionSpec: Fig.Spec = { { name: "--recovery-point-selection", description: - "This specifies criteria to assign a set of resources, such as resource types or backup vaults", + "The criteria to assign a set of resources, such as resource types or backup vaults", args: { name: "structure", }, @@ -332,19 +327,19 @@ const completionSpec: Fig.Spec = { { name: "create-logically-air-gapped-backup-vault", description: - "This request creates a logical container to where backups may be copied. This request includes a name, the Region, the maximum number of retention days, the minimum number of retention days, and optionally can include tags and a creator request ID. Do not include sensitive data, such as passport numbers, in the name of a backup vault", + "Creates a logical container to where backups may be copied. This request includes a name, the Region, the maximum number of retention days, the minimum number of retention days, and optionally can include tags and a creator request ID. Do not include sensitive data, such as passport numbers, in the name of a backup vault", options: [ { name: "--backup-vault-name", - description: "This is the name of the vault that is being created", + description: + "The name of a logical container where backups are stored. Logically air-gapped backup vaults are identified by names that are unique to the account used to create them and the Region where they are created", args: { name: "string", }, }, { name: "--backup-vault-tags", - description: - "These are the tags that will be included in the newly-created vault", + description: "The tags to assign to the vault", args: { name: "map", }, @@ -352,7 +347,7 @@ const completionSpec: Fig.Spec = { { name: "--creator-request-id", description: - "This is the ID of the creation request. This parameter is optional. If used, this parameter must contain 1 to 50 alphanumeric or '-_.' characters", + "The ID of the creation request. This parameter is optional. If used, this parameter must contain 1 to 50 alphanumeric or '-_.' characters", args: { name: "string", }, @@ -360,7 +355,7 @@ const completionSpec: Fig.Spec = { { name: "--min-retention-days", description: - "This setting specifies the minimum retention period that the vault retains its recovery points. If this parameter is not specified, no minimum retention period is enforced. If specified, any backup or copy job to the vault must have a lifecycle policy with a retention period equal to or longer than the minimum retention period. If a job retention period is shorter than that minimum retention period, then the vault fails the backup or copy job, and you should either modify your lifecycle settings or use a different vault", + "This setting specifies the minimum retention period that the vault retains its recovery points. The minimum value accepted is 7 days", args: { name: "long", }, @@ -368,7 +363,7 @@ const completionSpec: Fig.Spec = { { name: "--max-retention-days", description: - "This is the setting that specifies the maximum retention period that the vault retains its recovery points. If this parameter is not specified, Backup does not enforce a maximum retention period on the recovery points in the vault (allowing indefinite storage). If specified, any backup or copy job to the vault must have a lifecycle policy with a retention period equal to or shorter than the maximum retention period. If the job retention period is longer than that maximum retention period, then the vault fails the backup or copy job, and you should either modify your lifecycle settings or use a different vault", + "The maximum retention period that the vault retains its recovery points", args: { name: "long", }, @@ -431,8 +426,7 @@ const completionSpec: Fig.Spec = { }, { name: "--report-plan-tags", - description: - "Metadata that you can assign to help organize the report plans that you create. Each tag is a key-value pair", + description: "The tags to assign to the report plan", args: { name: "map", }, @@ -467,7 +461,7 @@ const completionSpec: Fig.Spec = { { name: "create-restore-testing-plan", description: - "This is the first of two steps to create a restore testing plan; once this request is successful, finish the procedure with request CreateRestoreTestingSelection. You must include the parameter RestoreTestingPlan. You may optionally include CreatorRequestId and Tags", + "Creates a restore testing plan. The first of two steps to create a restore testing plan. After this request is successful, finish the procedure using CreateRestoreTestingSelection", options: [ { name: "--creator-request-id", @@ -487,8 +481,7 @@ const completionSpec: Fig.Spec = { }, { name: "--tags", - description: - "Optional tags to include. A tag is a key-value pair you can use to manage, filter, and search for your resources. Allowed characters include UTF-8 letters,numbers, spaces, and the following characters: + - = . _ : /", + description: "The tags to assign to the restore testing plan", args: { name: "map", }, @@ -638,7 +631,7 @@ const completionSpec: Fig.Spec = { { name: "--backup-vault-name", description: - "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created. They consist of lowercase letters, numbers, and hyphens", + "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created", args: { name: "string", }, @@ -733,7 +726,7 @@ const completionSpec: Fig.Spec = { { name: "--backup-vault-name", description: - "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Region where they are created. They consist of lowercase letters, numbers, and hyphens", + "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Region where they are created", args: { name: "string", }, @@ -795,7 +788,7 @@ const completionSpec: Fig.Spec = { { name: "--backup-vault-name", description: - "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created. They consist of lowercase letters, numbers, and hyphens", + "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created", args: { name: "string", }, @@ -968,14 +961,14 @@ const completionSpec: Fig.Spec = { { name: "--backup-vault-name", description: - "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created. They consist of lowercase letters, numbers, and hyphens", + "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created", args: { name: "string", }, }, { name: "--backup-vault-account-id", - description: "This is the account ID of the specified backup vault", + description: "The account ID of the specified backup vault", args: { name: "string", }, @@ -1125,7 +1118,7 @@ const completionSpec: Fig.Spec = { { name: "--backup-vault-name", description: - "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created. They consist of lowercase letters, numbers, and hyphens", + "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created", args: { name: "string", }, @@ -1140,7 +1133,7 @@ const completionSpec: Fig.Spec = { }, { name: "--backup-vault-account-id", - description: "This is the account ID of the specified backup vault", + description: "The account ID of the specified backup vault", args: { name: "string", }, @@ -1330,7 +1323,7 @@ const completionSpec: Fig.Spec = { { name: "--backup-vault-name", description: - "This is the name of a logical container where the child (nested) recovery point is stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created. They consist of lowercase letters, numbers, and hyphens", + "The name of a logical container where the child (nested) recovery point is stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created", args: { name: "string", }, @@ -1338,7 +1331,7 @@ const completionSpec: Fig.Spec = { { name: "--recovery-point-arn", description: - "This is the Amazon Resource Name (ARN) that uniquely identifies the child (nested) recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45", + "The Amazon Resource Name (ARN) that uniquely identifies the child (nested) recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45", args: { name: "string", }, @@ -1542,7 +1535,7 @@ const completionSpec: Fig.Spec = { { name: "--backup-vault-name", description: - "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created. They consist of lowercase letters, numbers, and hyphens", + "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created", args: { name: "string", }, @@ -1573,7 +1566,7 @@ const completionSpec: Fig.Spec = { { name: "--backup-vault-name", description: - "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created. They consist of lowercase letters, numbers, and hyphens", + "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created", args: { name: "string", }, @@ -1604,8 +1597,7 @@ const completionSpec: Fig.Spec = { options: [ { name: "--legal-hold-id", - description: - "This is the ID required to use GetLegalHold. This unique ID is associated with a specific legal hold", + description: "The ID of the legal hold", args: { name: "string", }, @@ -1637,7 +1629,7 @@ const completionSpec: Fig.Spec = { { name: "--backup-vault-name", description: - "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created. They consist of lowercase letters, numbers, and hyphens", + "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created", args: { name: "string", }, @@ -1652,7 +1644,7 @@ const completionSpec: Fig.Spec = { }, { name: "--backup-vault-account-id", - description: "This is the account ID of the specified backup vault", + description: "The account ID of the specified backup vault", args: { name: "string", }, @@ -1715,7 +1707,7 @@ const completionSpec: Fig.Spec = { options: [ { name: "--backup-vault-account-id", - description: "This is the account ID of the specified backup vault", + description: "The account ID of the specified backup vault", args: { name: "string", }, @@ -1888,7 +1880,7 @@ const completionSpec: Fig.Spec = { { name: "--aggregation-period", description: - "This is the period that sets the boundaries for returned results. Acceptable values include ONE_DAY for daily job count for the prior 14 days. SEVEN_DAYS for the aggregated job count for the prior 7 days. FOURTEEN_DAYS for aggregated job count for prior 14 days", + "The period for the returned results. ONE_DAY - The daily job count for the prior 14 days. SEVEN_DAYS - The aggregated job count for the prior 7 days. FOURTEEN_DAYS - The aggregated job count for prior 14 days", args: { name: "string", }, @@ -1896,7 +1888,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "This parameter sets the maximum number of items to be returned. The value is an integer. Range of accepted values is from 1 to 500", + "The maximum number of items to be returned. The value is an integer. Range of accepted values is from 1 to 500", args: { name: "integer", }, @@ -1967,7 +1959,7 @@ const completionSpec: Fig.Spec = { { name: "--by-backup-vault-name", description: - "Returns only backup jobs that will be stored in the specified backup vault. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created. They consist of lowercase letters, numbers, and hyphens", + "Returns only backup jobs that will be stored in the specified backup vault. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created", args: { name: "string", }, @@ -1991,7 +1983,7 @@ const completionSpec: Fig.Spec = { { name: "--by-resource-type", description: - "Returns only backup jobs for the specified resources: Aurora for Amazon Aurora CloudFormation for CloudFormation DocumentDB for Amazon DocumentDB (with MongoDB compatibility) DynamoDB for Amazon DynamoDB EBS for Amazon Elastic Block Store EC2 for Amazon Elastic Compute Cloud EFS for Amazon Elastic File System FSx for Amazon FSx Neptune for Amazon Neptune Redshift for Amazon Redshift RDS for Amazon Relational Database Service SAP HANA on Amazon EC2 for SAP HANA databases Storage Gateway for Storage Gateway S3 for Amazon S3 Timestream for Amazon Timestream VirtualMachine for virtual machines", + "Returns only backup jobs for the specified resources: Aurora for Amazon Aurora CloudFormation for CloudFormation DocumentDB for Amazon DocumentDB (with MongoDB compatibility) DynamoDB for Amazon DynamoDB EBS for Amazon Elastic Block Store EC2 for Amazon Elastic Compute Cloud EFS for Amazon Elastic File System FSx for Amazon FSx Neptune for Amazon Neptune RDS for Amazon Relational Database Service Redshift for Amazon Redshift S3 for Amazon Simple Storage Service (Amazon S3) SAP HANA on Amazon EC2 for SAP HANA databases on Amazon Elastic Compute Cloud instances Storage Gateway for Storage Gateway Timestream for Amazon Timestream VirtualMachine for VMware virtual machines", args: { name: "string", }, @@ -2081,8 +2073,7 @@ const completionSpec: Fig.Spec = { }, { name: "list-backup-plan-templates", - description: - "Returns metadata of your saved backup plan templates, including the template ID, name, and the creation and deletion dates", + description: "Lists the backup plan templates", options: [ { name: "--next-token", @@ -2094,7 +2085,7 @@ const completionSpec: Fig.Spec = { }, { name: "--max-results", - description: "The maximum number of items to be returned", + description: "The maximum number of items to return", args: { name: "integer", }, @@ -2214,8 +2205,7 @@ const completionSpec: Fig.Spec = { }, { name: "list-backup-plans", - description: - "Returns a list of all active backup plans for an authenticated account. The list contains information such as Amazon Resource Names (ARNs), plan IDs, creation and deletion dates, version IDs, plan names, and creator request IDs", + description: "Lists the active backup plans for the account", options: [ { name: "--next-token", @@ -2476,7 +2466,7 @@ const completionSpec: Fig.Spec = { { name: "--aggregation-period", description: - "This is the period that sets the boundaries for returned results. ONE_DAY for daily job count for the prior 14 days. SEVEN_DAYS for the aggregated job count for the prior 7 days. FOURTEEN_DAYS for aggregated job count for prior 14 days", + "The period for the returned results. ONE_DAY - The daily job count for the prior 14 days. SEVEN_DAYS - The aggregated job count for the prior 7 days. FOURTEEN_DAYS - The aggregated job count for prior 14 days", args: { name: "string", }, @@ -2569,7 +2559,7 @@ const completionSpec: Fig.Spec = { { name: "--by-resource-type", description: - "Returns only backup jobs for the specified resources: Aurora for Amazon Aurora CloudFormation for CloudFormation DocumentDB for Amazon DocumentDB (with MongoDB compatibility) DynamoDB for Amazon DynamoDB EBS for Amazon Elastic Block Store EC2 for Amazon Elastic Compute Cloud EFS for Amazon Elastic File System FSx for Amazon FSx Neptune for Amazon Neptune Redshift for Amazon Redshift RDS for Amazon Relational Database Service SAP HANA on Amazon EC2 for SAP HANA databases Storage Gateway for Storage Gateway S3 for Amazon S3 Timestream for Amazon Timestream VirtualMachine for virtual machines", + "Returns only backup jobs for the specified resources: Aurora for Amazon Aurora CloudFormation for CloudFormation DocumentDB for Amazon DocumentDB (with MongoDB compatibility) DynamoDB for Amazon DynamoDB EBS for Amazon Elastic Block Store EC2 for Amazon Elastic Compute Cloud EFS for Amazon Elastic File System FSx for Amazon FSx Neptune for Amazon Neptune RDS for Amazon Relational Database Service Redshift for Amazon Redshift S3 for Amazon Simple Storage Service (Amazon S3) SAP HANA on Amazon EC2 for SAP HANA databases on Amazon Elastic Compute Cloud instances Storage Gateway for Storage Gateway Timestream for Amazon Timestream VirtualMachine for VMware virtual machines", args: { name: "string", }, @@ -2577,7 +2567,7 @@ const completionSpec: Fig.Spec = { { name: "--by-destination-vault-arn", description: - "An Amazon Resource Name (ARN) that uniquely identifies a source backup vault to copy from; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault", + "An Amazon Resource Name (ARN) that uniquely identifies a source backup vault to copy from; for example, arn:aws:backup:us-east-1:123456789012:backup-vault:aBackupVault", args: { name: "string", }, @@ -2840,7 +2830,7 @@ const completionSpec: Fig.Spec = { { name: "--backup-vault-name", description: - "This is the list of protected resources by backup vault within the vault(s) you specify by name", + "The list of protected resources by backup vault within the vault(s) you specify by name", args: { name: "string", }, @@ -2848,7 +2838,7 @@ const completionSpec: Fig.Spec = { { name: "--backup-vault-account-id", description: - "This is the list of protected resources by backup vault within the vault(s) you specify by account ID", + "The list of protected resources by backup vault within the vault(s) you specify by account ID", args: { name: "string", }, @@ -2919,7 +2909,7 @@ const completionSpec: Fig.Spec = { { name: "--backup-vault-name", description: - "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created. They consist of lowercase letters, numbers, and hyphens. Backup vault name might not be available when a supported service creates the backup", + "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created. Backup vault name might not be available when a supported service creates the backup", args: { name: "string", }, @@ -2958,7 +2948,7 @@ const completionSpec: Fig.Spec = { { name: "--by-resource-type", description: - "Returns only recovery points that match the specified resource type(s): Aurora for Amazon Aurora CloudFormation for CloudFormation DocumentDB for Amazon DocumentDB (with MongoDB compatibility) DynamoDB for Amazon DynamoDB EBS for Amazon Elastic Block Store EC2 for Amazon Elastic Compute Cloud EFS for Amazon Elastic File System FSx for Amazon FSx Neptune for Amazon Neptune Redshift for Amazon Redshift RDS for Amazon Relational Database Service SAP HANA on Amazon EC2 for SAP HANA databases Storage Gateway for Storage Gateway S3 for Amazon S3 Timestream for Amazon Timestream VirtualMachine for virtual machines", + "Returns only recovery points that match the specified resource type(s): Aurora for Amazon Aurora CloudFormation for CloudFormation DocumentDB for Amazon DocumentDB (with MongoDB compatibility) DynamoDB for Amazon DynamoDB EBS for Amazon Elastic Block Store EC2 for Amazon Elastic Compute Cloud EFS for Amazon Elastic File System FSx for Amazon FSx Neptune for Amazon Neptune RDS for Amazon Relational Database Service Redshift for Amazon Redshift S3 for Amazon Simple Storage Service (Amazon S3) SAP HANA on Amazon EC2 for SAP HANA databases on Amazon Elastic Compute Cloud instances Storage Gateway for Storage Gateway Timestream for Amazon Timestream VirtualMachine for VMware virtual machines", args: { name: "string", }, @@ -3045,7 +3035,7 @@ const completionSpec: Fig.Spec = { options: [ { name: "--legal-hold-id", - description: "This is the ID of the legal hold", + description: "The ID of the legal hold", args: { name: "string", }, @@ -3053,7 +3043,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "This is the next item following a partial list of returned resources. For example, if a request is made to return MaxResults number of resources, NextToken allows you to return more items in your list starting at the location pointed to by the next token", + "The next item following a partial list of returned resources. For example, if a request is made to return MaxResults number of resources, NextToken allows you to return more items in your list starting at the location pointed to by the next token", args: { name: "string", }, @@ -3061,7 +3051,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "This is the maximum number of resource list items to be returned", + "The maximum number of resource list items to be returned", args: { name: "integer", }, @@ -3112,7 +3102,7 @@ const completionSpec: Fig.Spec = { { name: "list-recovery-points-by-resource", description: - "Returns detailed information about all the recovery points of the type specified by a resource Amazon Resource Name (ARN). For Amazon EFS and Amazon EC2, this action only lists recovery points created by Backup", + "The information about the recovery points of the type specified by a resource Amazon Resource Name (ARN). For Amazon EFS and Amazon EC2, this action only lists recovery points created by Backup", options: [ { name: "--resource-arn", @@ -3334,7 +3324,7 @@ const completionSpec: Fig.Spec = { { name: "--aggregation-period", description: - "This is the period that sets the boundaries for returned results. Acceptable values include ONE_DAY for daily job count for the prior 14 days. SEVEN_DAYS for the aggregated job count for the prior 7 days. FOURTEEN_DAYS for aggregated job count for prior 14 days", + "The period for the returned results. ONE_DAY - The daily job count for the prior 14 days. SEVEN_DAYS - The aggregated job count for the prior 7 days. FOURTEEN_DAYS - The aggregated job count for prior 14 days", args: { name: "string", }, @@ -3405,7 +3395,7 @@ const completionSpec: Fig.Spec = { { name: "--by-resource-type", description: - "Include this parameter to return only restore jobs for the specified resources: Aurora for Amazon Aurora CloudFormation for CloudFormation DocumentDB for Amazon DocumentDB (with MongoDB compatibility) DynamoDB for Amazon DynamoDB EBS for Amazon Elastic Block Store EC2 for Amazon Elastic Compute Cloud EFS for Amazon Elastic File System FSx for Amazon FSx Neptune for Amazon Neptune Redshift for Amazon Redshift RDS for Amazon Relational Database Service SAP HANA on Amazon EC2 for SAP HANA databases Storage Gateway for Storage Gateway S3 for Amazon S3 Timestream for Amazon Timestream VirtualMachine for virtual machines", + "Include this parameter to return only restore jobs for the specified resources: Aurora for Amazon Aurora CloudFormation for CloudFormation DocumentDB for Amazon DocumentDB (with MongoDB compatibility) DynamoDB for Amazon DynamoDB EBS for Amazon Elastic Block Store EC2 for Amazon Elastic Compute Cloud EFS for Amazon Elastic File System FSx for Amazon FSx Neptune for Amazon Neptune RDS for Amazon Relational Database Service Redshift for Amazon Redshift S3 for Amazon Simple Storage Service (Amazon S3) SAP HANA on Amazon EC2 for SAP HANA databases on Amazon Elastic Compute Cloud instances Storage Gateway for Storage Gateway Timestream for Amazon Timestream VirtualMachine for VMware virtual machines", args: { name: "string", }, @@ -3732,7 +3722,7 @@ const completionSpec: Fig.Spec = { { name: "list-tags", description: - 'Returns a list of key-value pairs assigned to a target recovery point, backup plan, or backup vault. ListTags only works for resource types that support full Backup management of their backups. Those resource types are listed in the "Full Backup management" section of the Feature availability by resource table', + "Returns the tags assigned to the resource, such as a target recovery point, backup plan, or backup vault", options: [ { name: "--resource-arn", @@ -3784,7 +3774,7 @@ const completionSpec: Fig.Spec = { { name: "--backup-vault-name", description: - "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created. They consist of lowercase letters, numbers, and hyphens", + "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created", args: { name: "string", }, @@ -3818,7 +3808,7 @@ const completionSpec: Fig.Spec = { { name: "put-backup-vault-lock-configuration", description: - "Applies Backup Vault Lock to a backup vault, preventing attempts to delete any recovery point stored in or created in a backup vault. Vault Lock also prevents attempts to update the lifecycle policy that controls the retention period of any recovery point currently stored in a backup vault. If specified, Vault Lock enforces a minimum and maximum retention period for future backup and copy jobs that target a backup vault. Backup Vault Lock has been assessed by Cohasset Associates for use in environments that are subject to SEC 17a-4, CFTC, and FINRA regulations. For more information about how Backup Vault Lock relates to these regulations, see the Cohasset Associates Compliance Assessment", + "Applies Backup Vault Lock to a backup vault, preventing attempts to delete any recovery point stored in or created in a backup vault. Vault Lock also prevents attempts to update the lifecycle policy that controls the retention period of any recovery point currently stored in a backup vault. If specified, Vault Lock enforces a minimum and maximum retention period for future backup and copy jobs that target a backup vault. Backup Vault Lock has been assessed by Cohasset Associates for use in environments that are subject to SEC 17a-4, CFTC, and FINRA regulations. For more information about how Backup Vault Lock relates to these regulations, see the Cohasset Associates Compliance Assessment. For more information, see Backup Vault Lock", options: [ { name: "--backup-vault-name", @@ -3831,7 +3821,7 @@ const completionSpec: Fig.Spec = { { name: "--min-retention-days", description: - "The Backup Vault Lock configuration that specifies the minimum retention period that the vault retains its recovery points. This setting can be useful if, for example, your organization's policies require you to retain certain data for at least seven years (2555 days). If this parameter is not specified, Vault Lock will not enforce a minimum retention period. If this parameter is specified, any backup or copy job to the vault must have a lifecycle policy with a retention period equal to or longer than the minimum retention period. If the job's retention period is shorter than that minimum retention period, then the vault fails that backup or copy job, and you should either modify your lifecycle settings or use a different vault. The shortest minimum retention period you can specify is 1 day. Recovery points already saved in the vault prior to Vault Lock are not affected", + "The Backup Vault Lock configuration that specifies the minimum retention period that the vault retains its recovery points. This setting can be useful if, for example, your organization's policies require you to retain certain data for at least seven years (2555 days). This parameter is required when a vault lock is created through CloudFormation; otherwise, this parameter is optional. If this parameter is not specified, Vault Lock will not enforce a minimum retention period. If this parameter is specified, any backup or copy job to the vault must have a lifecycle policy with a retention period equal to or longer than the minimum retention period. If the job's retention period is shorter than that minimum retention period, then the vault fails that backup or copy job, and you should either modify your lifecycle settings or use a different vault. The shortest minimum retention period you can specify is 1 day. Recovery points already saved in the vault prior to Vault Lock are not affected", args: { name: "long", }, @@ -3879,7 +3869,7 @@ const completionSpec: Fig.Spec = { { name: "--backup-vault-name", description: - "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created. They consist of lowercase letters, numbers, and hyphens", + "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created", args: { name: "string", }, @@ -3895,7 +3885,7 @@ const completionSpec: Fig.Spec = { { name: "--backup-vault-events", description: - "An array of events that indicate the status of jobs to back up resources to the backup vault. For common use cases and code samples, see Using Amazon SNS to track Backup events. The following events are supported: BACKUP_JOB_STARTED | BACKUP_JOB_COMPLETED COPY_JOB_STARTED | COPY_JOB_SUCCESSFUL | COPY_JOB_FAILED RESTORE_JOB_STARTED | RESTORE_JOB_COMPLETED | RECOVERY_POINT_MODIFIED S3_BACKUP_OBJECT_FAILED | S3_RESTORE_OBJECT_FAILED The list below shows items that are deprecated events (for reference) and are no longer in use. They are no longer supported and will not return statuses or notifications. Refer to the list above for current supported events", + "An array of events that indicate the status of jobs to back up resources to the backup vault. For common use cases and code samples, see Using Amazon SNS to track Backup events. The following events are supported: BACKUP_JOB_STARTED | BACKUP_JOB_COMPLETED COPY_JOB_STARTED | COPY_JOB_SUCCESSFUL | COPY_JOB_FAILED RESTORE_JOB_STARTED | RESTORE_JOB_COMPLETED | RECOVERY_POINT_MODIFIED S3_BACKUP_OBJECT_FAILED | S3_RESTORE_OBJECT_FAILED The list below includes both supported events and deprecated events that are no longer in use (for reference). Deprecated events do not return statuses or notifications. Refer to the list above for the supported events", args: { name: "list", }, @@ -3934,7 +3924,7 @@ const completionSpec: Fig.Spec = { }, { name: "--validation-status", - description: "This is the status of your restore validation", + description: "The status of your restore validation", args: { name: "string", }, @@ -3973,7 +3963,7 @@ const completionSpec: Fig.Spec = { { name: "--backup-vault-name", description: - "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created. They consist of lowercase letters, numbers, and hyphens", + "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created", args: { name: "string", }, @@ -4021,15 +4011,14 @@ const completionSpec: Fig.Spec = { { name: "--lifecycle", description: - 'The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. Backup will transition and expire backups automatically according to the lifecycle that you define. Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the \u201cretention\u201d setting must be 90 days greater than the \u201ctransition to cold after days\u201d setting. The \u201ctransition to cold after days\u201d setting cannot be changed after a backup has been transitioned to cold. Resource types that are able to be transitioned to cold storage are listed in the "Lifecycle to cold storage" section of the Feature availability by resource table. Backup ignores this expression for other resource types. This parameter has a maximum value of 100 years (36,500 days)', + "The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. Backup will transition and expire backups automatically according to the lifecycle that you define. Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the \u201cretention\u201d setting must be 90 days greater than the \u201ctransition to cold after days\u201d setting. The \u201ctransition to cold after days\u201d setting cannot be changed after a backup has been transitioned to cold. Resource types that can transition to cold storage are listed in the Feature availability by resource table. Backup ignores this expression for other resource types. This parameter has a maximum value of 100 years (36,500 days)", args: { name: "structure", }, }, { name: "--recovery-point-tags", - description: - "To help organize your resources, you can assign your own metadata to the resources that you create. Each tag is a key-value pair", + description: "The tags to assign to the resources", args: { name: "map", }, @@ -4037,7 +4026,7 @@ const completionSpec: Fig.Spec = { { name: "--backup-options", description: - 'Specifies the backup option for a selected resource. This option is only available for Windows Volume Shadow Copy Service (VSS) backup jobs. Valid values: Set to "WindowsVSS":"enabled" to enable the WindowsVSS backup option and create a Windows VSS backup. Set to "WindowsVSS""disabled" to create a regular backup. The WindowsVSS option is not enabled by default', + 'The backup option for a selected resource. This option is only available for Windows Volume Shadow Copy Service (VSS) backup jobs. Valid values: Set to "WindowsVSS":"enabled" to enable the WindowsVSS backup option and create a Windows VSS backup. Set to "WindowsVSS""disabled" to create a regular backup. The WindowsVSS option is not enabled by default', args: { name: "map", }, @@ -4077,7 +4066,7 @@ const completionSpec: Fig.Spec = { { name: "--source-backup-vault-name", description: - "The name of a logical source container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created. They consist of lowercase letters, numbers, and hyphens", + "The name of a logical source container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created", args: { name: "string", }, @@ -4085,7 +4074,7 @@ const completionSpec: Fig.Spec = { { name: "--destination-backup-vault-arn", description: - "An Amazon Resource Name (ARN) that uniquely identifies a destination backup vault to copy to; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault", + "An Amazon Resource Name (ARN) that uniquely identifies a destination backup vault to copy to; for example, arn:aws:backup:us-east-1:123456789012:backup-vault:aBackupVault", args: { name: "string", }, @@ -4109,7 +4098,7 @@ const completionSpec: Fig.Spec = { { name: "--lifecycle", description: - 'Contains an array of Transition objects specifying how long in days before a recovery point transitions to cold storage or is deleted. Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, on the console, the \u201cretention\u201d setting must be 90 days greater than the \u201ctransition to cold after days\u201d setting. The \u201ctransition to cold after days\u201d setting cannot be changed after a backup has been transitioned to cold. Resource types that are able to be transitioned to cold storage are listed in the "Lifecycle to cold storage" section of the Feature availability by resource table. Backup ignores this expression for other resource types', + "Specifies the time period, in days, before a recovery point transitions to cold storage or is deleted. Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, on the console, the retention setting must be 90 days greater than the transition to cold after days setting. The transition to cold after days setting can't be changed after a backup has been transitioned to cold. Resource types that can transition to cold storage are listed in the Feature availability by resource table. Backup ignores this expression for other resource types. To remove the existing lifecycle and retention periods and keep your recovery points indefinitely, specify -1 for MoveToColdStorageAfterDays and DeleteAfterDays", args: { name: "structure", }, @@ -4188,7 +4177,7 @@ const completionSpec: Fig.Spec = { { name: "--metadata", description: - 'A set of metadata key-value pairs. Contains information, such as a resource name, required to restore a recovery point. You can get configuration metadata about a resource at the time it was backed up by calling GetRecoveryPointRestoreMetadata. However, values in addition to those provided by GetRecoveryPointRestoreMetadata might be required to restore a resource. For example, you might need to provide a new resource name if the original already exists. You need to specify specific metadata to restore an Amazon Elastic File System (Amazon EFS) instance: file-system-id: The ID of the Amazon EFS file system that is backed up by Backup. Returned in GetRecoveryPointRestoreMetadata. Encrypted: A Boolean value that, if true, specifies that the file system is encrypted. If KmsKeyId is specified, Encrypted must be set to true. KmsKeyId: Specifies the Amazon Web Services KMS key that is used to encrypt the restored file system. You can specify a key from another Amazon Web Services account provided that key it is properly shared with your account via Amazon Web Services KMS. PerformanceMode: Specifies the throughput mode of the file system. CreationToken: A user-supplied value that ensures the uniqueness (idempotency) of the request. newFileSystem: A Boolean value that, if true, specifies that the recovery point is restored to a new Amazon EFS file system. ItemsToRestore: An array of one to five strings where each string is a file path. Use ItemsToRestore to restore specific files or directories rather than the entire file system. This parameter is optional. For example, "itemsToRestore":"[\\"/my.test\\"]"', + "A set of metadata key-value pairs. You can get configuration metadata about a resource at the time it was backed up by calling GetRecoveryPointRestoreMetadata. However, values in addition to those provided by GetRecoveryPointRestoreMetadata might be required to restore a resource. For example, you might need to provide a new resource name if the original already exists. For more information about the metadata for each resource, see the following: Metadata for Amazon Aurora Metadata for Amazon DocumentDB Metadata for CloudFormation Metadata for Amazon DynamoDB Metadata for Amazon EBS Metadata for Amazon EC2 Metadata for Amazon EFS Metadata for Amazon FSx Metadata for Amazon Neptune Metadata for Amazon RDS Metadata for Amazon Redshift Metadata for Storage Gateway Metadata for Amazon S3 Metadata for Amazon Timestream Metadata for virtual machines", args: { name: "map", }, @@ -4212,7 +4201,7 @@ const completionSpec: Fig.Spec = { { name: "--resource-type", description: - "Starts a job to restore a recovery point for one of the following resources: Aurora for Amazon Aurora DocumentDB for Amazon DocumentDB (with MongoDB compatibility) CloudFormation for CloudFormation DynamoDB for Amazon DynamoDB EBS for Amazon Elastic Block Store EC2 for Amazon Elastic Compute Cloud EFS for Amazon Elastic File System FSx for Amazon FSx Neptune for Amazon Neptune RDS for Amazon Relational Database Service Redshift for Amazon Redshift Storage Gateway for Storage Gateway S3 for Amazon S3 Timestream for Amazon Timestream VirtualMachine for virtual machines", + "Starts a job to restore a recovery point for one of the following resources: Aurora - Amazon Aurora DocumentDB - Amazon DocumentDB CloudFormation - CloudFormation DynamoDB - Amazon DynamoDB EBS - Amazon Elastic Block Store EC2 - Amazon Elastic Compute Cloud EFS - Amazon Elastic File System FSx - Amazon FSx Neptune - Amazon Neptune RDS - Amazon Relational Database Service Redshift - Amazon Redshift Storage Gateway - Storage Gateway S3 - Amazon Simple Storage Service Timestream - Amazon Timestream VirtualMachine - Virtual machines", args: { name: "string", }, @@ -4249,7 +4238,7 @@ const completionSpec: Fig.Spec = { { name: "stop-backup-job", description: - "Attempts to cancel a job to create a one-time backup of a resource. This action is not supported for the following services: Amazon FSx for Windows File Server, Amazon FSx for Lustre, Amazon FSx for NetApp ONTAP , Amazon FSx for OpenZFS, Amazon DocumentDB (with MongoDB compatibility), Amazon RDS, Amazon Aurora, and Amazon Neptune", + "Attempts to cancel a job to create a one-time backup of a resource. This action is not supported for the following services: Amazon FSx for Windows File Server, Amazon FSx for Lustre, Amazon FSx for NetApp ONTAP, Amazon FSx for OpenZFS, Amazon DocumentDB (with MongoDB compatibility), Amazon RDS, Amazon Aurora, and Amazon Neptune", options: [ { name: "--backup-job-id", @@ -4281,12 +4270,12 @@ const completionSpec: Fig.Spec = { { name: "tag-resource", description: - "Assigns a set of key-value pairs to a recovery point, backup plan, or backup vault identified by an Amazon Resource Name (ARN)", + "Assigns a set of key-value pairs to a recovery point, backup plan, or backup vault identified by an Amazon Resource Name (ARN). This API is supported for recovery points for resource types including Aurora, Amazon DocumentDB. Amazon EBS, Amazon FSx, Neptune, and Amazon RDS", options: [ { name: "--resource-arn", description: - "An ARN that uniquely identifies a resource. The format of the ARN depends on the type of the tagged resource", + "An ARN that uniquely identifies a resource. The format of the ARN depends on the type of the tagged resource. ARNs that do not include backup are incompatible with tagging. TagResource and UntagResource with invalid ARNs will result in an error. Acceptable ARN content can include arn:aws:backup:us-east. Invalid ARN content may look like arn:aws:ec2:us-east", args: { name: "string", }, @@ -4321,12 +4310,12 @@ const completionSpec: Fig.Spec = { { name: "untag-resource", description: - "Removes a set of key-value pairs from a recovery point, backup plan, or backup vault identified by an Amazon Resource Name (ARN)", + "Removes a set of key-value pairs from a recovery point, backup plan, or backup vault identified by an Amazon Resource Name (ARN) This API is not supported for recovery points for resource types including Aurora, Amazon DocumentDB. Amazon EBS, Amazon FSx, Neptune, and Amazon RDS", options: [ { name: "--resource-arn", description: - "An ARN that uniquely identifies a resource. The format of the ARN depends on the type of the tagged resource", + "An ARN that uniquely identifies a resource. The format of the ARN depends on the type of the tagged resource. ARNs that do not include backup are incompatible with tagging. TagResource and UntagResource with invalid ARNs will result in an error. Acceptable ARN content can include arn:aws:backup:us-east. Invalid ARN content may look like arn:aws:ec2:us-east", args: { name: "string", }, @@ -4334,7 +4323,7 @@ const completionSpec: Fig.Spec = { { name: "--tag-key-list", description: - "A list of keys to identify which key-value tags to remove from a resource", + "The keys to identify which key-value tags to remove from a resource", args: { name: "list", }, @@ -4361,11 +4350,11 @@ const completionSpec: Fig.Spec = { { name: "update-backup-plan", description: - "Updates an existing backup plan identified by its backupPlanId with the input document in JSON format. The new version is uniquely identified by a VersionId", + "Updates the specified backup plan. The new version is uniquely identified by its ID", options: [ { name: "--backup-plan-id", - description: "Uniquely identifies a backup plan", + description: "The ID of the backup plan", args: { name: "string", }, @@ -4373,7 +4362,7 @@ const completionSpec: Fig.Spec = { { name: "--backup-plan", description: - "Specifies the body of a backup plan. Includes a BackupPlanName and one or more sets of Rules", + "The body of a backup plan. Includes a BackupPlanName and one or more sets of Rules", args: { name: "structure", }, @@ -4399,8 +4388,7 @@ const completionSpec: Fig.Spec = { }, { name: "update-framework", - description: - "Updates an existing framework identified by its FrameworkName with the input document in JSON format", + description: "Updates the specified framework", options: [ { name: "--framework-name", @@ -4421,7 +4409,7 @@ const completionSpec: Fig.Spec = { { name: "--framework-controls", description: - "A list of the controls that make up the framework. Each control in the list has a name, input parameters, and scope", + "The controls that make up the framework. Each control in the list has a name, input parameters, and scope", args: { name: "list", }, @@ -4488,12 +4476,12 @@ const completionSpec: Fig.Spec = { { name: "update-recovery-point-lifecycle", description: - 'Sets the transition lifecycle of a recovery point. The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. Backup transitions and expires backups automatically according to the lifecycle that you define. Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the \u201cretention\u201d setting must be 90 days greater than the \u201ctransition to cold after days\u201d setting. The \u201ctransition to cold after days\u201d setting cannot be changed after a backup has been transitioned to cold. Resource types that are able to be transitioned to cold storage are listed in the "Lifecycle to cold storage" section of the Feature availability by resource table. Backup ignores this expression for other resource types. This operation does not support continuous backups', + "Sets the transition lifecycle of a recovery point. The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. Backup transitions and expires backups automatically according to the lifecycle that you define. Resource types that can transition to cold storage are listed in the Feature availability by resource table. Backup ignores this expression for other resource types. Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the \u201cretention\u201d setting must be 90 days greater than the \u201ctransition to cold after days\u201d setting. The \u201ctransition to cold after days\u201d setting cannot be changed after a backup has been transitioned to cold. If your lifecycle currently uses the parameters DeleteAfterDays and MoveToColdStorageAfterDays, include these parameters and their values when you call this operation. Not including them may result in your plan updating with null values. This operation does not support continuous backups", options: [ { name: "--backup-vault-name", description: - "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created. They consist of lowercase letters, numbers, and hyphens", + "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created", args: { name: "string", }, @@ -4575,8 +4563,7 @@ const completionSpec: Fig.Spec = { }, { name: "update-report-plan", - description: - "Updates an existing report plan identified by its ReportPlanName with the input document in JSON format", + description: "Updates the specified report plan", options: [ { name: "--report-plan-name", @@ -4597,7 +4584,7 @@ const completionSpec: Fig.Spec = { { name: "--report-delivery-channel", description: - "A structure that contains information about where to deliver your reports, specifically your Amazon S3 bucket name, S3 key prefix, and the formats of your reports", + "The information about where to deliver your reports, specifically your Amazon S3 bucket name, S3 key prefix, and the formats of your reports", args: { name: "structure", }, @@ -4605,7 +4592,7 @@ const completionSpec: Fig.Spec = { { name: "--report-setting", description: - "Identifies the report template for the report. Reports are built using a report template. The report templates are: RESOURCE_COMPLIANCE_REPORT | CONTROL_COMPLIANCE_REPORT | BACKUP_JOB_REPORT | COPY_JOB_REPORT | RESTORE_JOB_REPORT If the report template is RESOURCE_COMPLIANCE_REPORT or CONTROL_COMPLIANCE_REPORT, this API resource also describes the report coverage by Amazon Web Services Regions and frameworks", + "The report template for the report. Reports are built using a report template. The report templates are: RESOURCE_COMPLIANCE_REPORT | CONTROL_COMPLIANCE_REPORT | BACKUP_JOB_REPORT | COPY_JOB_REPORT | RESTORE_JOB_REPORT If the report template is RESOURCE_COMPLIANCE_REPORT or CONTROL_COMPLIANCE_REPORT, this API resource also describes the report coverage by Amazon Web Services Regions and frameworks", args: { name: "structure", }, @@ -4651,8 +4638,7 @@ const completionSpec: Fig.Spec = { }, { name: "--restore-testing-plan-name", - description: - "This is the restore testing plan name you wish to update", + description: "The name of the restore testing plan name", args: { name: "string", }, @@ -4679,7 +4665,7 @@ const completionSpec: Fig.Spec = { { name: "update-restore-testing-selection", description: - "Most elements except the RestoreTestingSelectionName can be updated with this request. RestoreTestingSelection can use either protected resource ARNs or conditions, but not both. That is, if your selection has ProtectedResourceArns, requesting an update with the parameter ProtectedResourceConditions will be unsuccessful", + "Updates the specified restore testing selection. Most elements except the RestoreTestingSelectionName can be updated with this request. You can use either protected resource ARNs or conditions, but not both", options: [ { name: "--restore-testing-plan-name", @@ -4700,7 +4686,7 @@ const completionSpec: Fig.Spec = { { name: "--restore-testing-selection-name", description: - "This is the required restore testing selection name of the restore testing selection you wish to update", + "The required restore testing selection name of the restore testing selection you wish to update", args: { name: "string", }, diff --git a/src/aws/bedrock-agent-runtime.ts b/src/aws/bedrock-agent-runtime.ts index a727a62be86c..e4fd70bb7855 100644 --- a/src/aws/bedrock-agent-runtime.ts +++ b/src/aws/bedrock-agent-runtime.ts @@ -204,7 +204,7 @@ const completionSpec: Fig.Spec = { { name: "retrieve-and-generate", description: - "Queries a knowledge base and generates responses based on the retrieved results. The response only cites sources that are relevant to the query", + "Queries a knowledge base and generates responses based on the retrieved results and using the specified foundation model or inference profile. The response only cites sources that are relevant to the query", options: [ { name: "--input", diff --git a/src/aws/bedrock-agent.ts b/src/aws/bedrock-agent.ts index 706959618d8d..d1bc2721a95a 100644 --- a/src/aws/bedrock-agent.ts +++ b/src/aws/bedrock-agent.ts @@ -70,7 +70,7 @@ const completionSpec: Fig.Spec = { { name: "create-agent", description: - "Creates an agent that orchestrates interactions between foundation models, data sources, software applications, user conversations, and APIs to carry out tasks to help customers. Specify the following fields for security purposes. agentResourceRoleArn \u2013 The Amazon Resource Name (ARN) of the role with permissions to invoke API operations on an agent. (Optional) customerEncryptionKeyArn \u2013 The Amazon Resource Name (ARN) of a KMS key to encrypt the creation of the agent. (Optional) idleSessionTTLinSeconds \u2013 Specify the number of seconds for which the agent should maintain session information. After this time expires, the subsequent InvokeAgent request begins a new session. To enable your agent to retain conversational context across multiple sessions, include a memoryConfiguration object. For more information, see Configure memory. To override the default prompt behavior for agent orchestration and to use advanced prompts, include a promptOverrideConfiguration object. For more information, see Advanced prompts. If you agent fails to be created, the response returns a list of failureReasons alongside a list of recommendedActions for you to troubleshoot", + "Creates an agent that orchestrates interactions between foundation models, data sources, software applications, user conversations, and APIs to carry out tasks to help customers. Specify the following fields for security purposes. agentResourceRoleArn \u2013 The Amazon Resource Name (ARN) of the role with permissions to invoke API operations on an agent. (Optional) customerEncryptionKeyArn \u2013 The Amazon Resource Name (ARN) of a KMS key to encrypt the creation of the agent. (Optional) idleSessionTTLinSeconds \u2013 Specify the number of seconds for which the agent should maintain session information. After this time expires, the subsequent InvokeAgent request begins a new session. To enable your agent to retain conversational context across multiple sessions, include a memoryConfiguration object. For more information, see Configure memory. To override the default prompt behavior for agent orchestration and to use advanced prompts, include a promptOverrideConfiguration object. For more information, see Advanced prompts. If your agent fails to be created, the response returns a list of failureReasons alongside a list of recommendedActions for you to troubleshoot. The agent instructions will not be honored if your agent has only one knowledge base, uses default prompts, has no action group, and user input is disabled", options: [ { name: "--agent-name", @@ -371,7 +371,7 @@ const completionSpec: Fig.Spec = { { name: "--data-deletion-policy", description: - "The data deletion policy for the data source. You can set the data deletion policy to: DELETE: Deletes all underlying data belonging to the data source from the vector store upon deletion of a knowledge base or data source resource. Note that the vector store itself is not deleted, only the underlying data. This flag is ignored if an Amazon Web Services account is deleted. RETAIN: Retains all underlying data in your vector store upon deletion of a knowledge base or data source resource", + "The data deletion policy for the data source. You can set the data deletion policy to: DELETE: Deletes all data from your data source that\u2019s converted into vector embeddings upon deletion of a knowledge base or data source resource. Note that the vector store itself is not deleted, only the data. This flag is ignored if an Amazon Web Services account is deleted. RETAIN: Retains all data from your data source that\u2019s converted into vector embeddings upon deletion of a knowledge base or data source resource. Note that the vector store itself is not deleted if you delete a knowledge base or data source resource", args: { name: "string", }, @@ -1226,7 +1226,7 @@ const completionSpec: Fig.Spec = { { name: "delete-prompt", description: - "Deletes a prompt or a prompt version from the Prompt management tool. For more information, see Delete prompts from the Prompt management tool and Delete a version of a prompt from the Prompt management tool in the Amazon Bedrock User Guide", + "Deletes a prompt or a version of it, depending on whether you include the promptVersion field or not. For more information, see Delete prompts from the Prompt management tool and Delete a version of a prompt from the Prompt management tool in the Amazon Bedrock User Guide", options: [ { name: "--prompt-identifier", @@ -1237,7 +1237,8 @@ const completionSpec: Fig.Spec = { }, { name: "--prompt-version", - description: "The version of the prompt to delete", + description: + "The version of the prompt to delete. To delete the prompt, omit this field", args: { name: "string", }, @@ -1738,7 +1739,7 @@ const completionSpec: Fig.Spec = { { name: "get-prompt", description: - "Retrieves information about a prompt or a version of it. For more information, see View information about prompts using Prompt management and View information about a version of your prompt in the Amazon Bedrock User Guide", + "Retrieves information about the working draft (DRAFT version) of a prompt or a version of it, depending on whether you include the promptVersion field or not. For more information, see View information about prompts using Prompt management and View information about a version of your prompt in the Amazon Bedrock User Guide", options: [ { name: "--prompt-identifier", @@ -1750,7 +1751,7 @@ const completionSpec: Fig.Spec = { { name: "--prompt-version", description: - "The version of the prompt about which you want to retrieve information", + "The version of the prompt about which you want to retrieve information. Omit this field to return information about the working draft of the prompt", args: { name: "string", }, @@ -2578,7 +2579,7 @@ const completionSpec: Fig.Spec = { { name: "list-prompts", description: - "Returns a list of prompts from the Prompt management tool and information about each prompt. For more information, see View information about prompts using Prompt management in the Amazon Bedrock User Guide", + "Returns either information about the working draft (DRAFT version) of each prompt in an account, or information about of all versions of a prompt, depending on whether you include the promptIdentifier field or not. For more information, see View information about prompts using Prompt management in the Amazon Bedrock User Guide", options: [ { name: "--max-results", @@ -2598,7 +2599,8 @@ const completionSpec: Fig.Spec = { }, { name: "--prompt-identifier", - description: "The unique identifier of the prompt", + description: + "The unique identifier of the prompt for whose versions you want to return information. Omit this field to list information about all prompts in an account", args: { name: "string", }, @@ -3369,7 +3371,7 @@ const completionSpec: Fig.Spec = { }, { name: "--description", - description: "A description for the flow alias", + description: "A description for the alias", args: { name: "string", }, @@ -3383,7 +3385,7 @@ const completionSpec: Fig.Spec = { }, { name: "--name", - description: "The name of the flow alias", + description: "The name of the alias", args: { name: "string", }, diff --git a/src/aws/bedrock-runtime.ts b/src/aws/bedrock-runtime.ts index 695aebf0a95c..62c1b298ee97 100644 --- a/src/aws/bedrock-runtime.ts +++ b/src/aws/bedrock-runtime.ts @@ -66,7 +66,7 @@ const completionSpec: Fig.Spec = { { name: "--model-id", description: - "The identifier for the model that you want to call. The modelId to provide depends on the type of model that you use: If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the Amazon Bedrock User Guide. If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see Run inference using a Provisioned Throughput in the Amazon Bedrock User Guide. If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User Guide", + "The identifier for the model that you want to call. The modelId to provide depends on the type of model or throughput that you use: If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the Amazon Bedrock User Guide. If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see Supported Regions and models for cross-region inference in the Amazon Bedrock User Guide. If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see Run inference using a Provisioned Throughput in the Amazon Bedrock User Guide. If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User Guide. The Converse API doesn't support imported models", args: { name: "string", }, @@ -176,7 +176,7 @@ const completionSpec: Fig.Spec = { { name: "--model-id", description: - "The unique identifier of the model to invoke to run inference. The modelId to provide depends on the type of model that you use: If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the Amazon Bedrock User Guide. If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see Run inference using a Provisioned Throughput in the Amazon Bedrock User Guide. If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User Guide", + "The unique identifier of the model to invoke to run inference. The modelId to provide depends on the type of model that you use: If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the Amazon Bedrock User Guide. If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see Run inference using a Provisioned Throughput in the Amazon Bedrock User Guide. If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User Guide. If you use an imported model, specify the ARN of the imported model. You can get the model ARN from a successful call to CreateModelImportJob or from the Imported models page in the Amazon Bedrock console", args: { name: "string", }, diff --git a/src/aws/bedrock.ts b/src/aws/bedrock.ts index e192502eda5b..b93f69fe9177 100644 --- a/src/aws/bedrock.ts +++ b/src/aws/bedrock.ts @@ -3,6 +3,37 @@ const completionSpec: Fig.Spec = { description: "Describes the API operations for creating, managing, fine-turning, and evaluating Amazon Bedrock models", subcommands: [ + { + name: "batch-delete-evaluation-job", + description: + "Creates a batch deletion job. A model evaluation job can only be deleted if it has following status FAILED, COMPLETED, and STOPPED. You can request up to 25 model evaluation jobs be deleted in a single request", + options: [ + { + name: "--job-identifiers", + description: "An array of model evaluation job ARNs to be deleted", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-evaluation-job", description: @@ -424,7 +455,7 @@ const completionSpec: Fig.Spec = { { name: "--vpc-config", description: - "VPC configuration (optional). Configuration parameters for the private Virtual Private Cloud (VPC) that contains the resources you are using for this job", + "The configuration of the Virtual Private Cloud (VPC) that contains the resources that you're using for this job. For more information, see Protect your model customization jobs using a VPC", args: { name: "structure", }, @@ -448,10 +479,99 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-model-import-job", + description: + "Creates a model import job to import model that you have customized in other environments, such as Amazon SageMaker. For more information, see Import a customized model", + options: [ + { + name: "--job-name", + description: "The name of the import job", + args: { + name: "string", + }, + }, + { + name: "--imported-model-name", + description: "The name of the imported model", + args: { + name: "string", + }, + }, + { + name: "--role-arn", + description: "The Amazon Resource Name (ARN) of the model import job", + args: { + name: "string", + }, + }, + { + name: "--model-data-source", + description: "The data source for the imported model", + args: { + name: "structure", + }, + }, + { + name: "--job-tags", + description: "Tags to attach to this import job", + args: { + name: "list", + }, + }, + { + name: "--imported-model-tags", + description: "Tags to attach to the imported model", + args: { + name: "list", + }, + }, + { + name: "--client-request-token", + description: + "A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency", + args: { + name: "string", + }, + }, + { + name: "--vpc-config", + description: + "VPC configuration parameters for the private Virtual Private Cloud (VPC) that contains the resources you are using for the import job", + args: { + name: "structure", + }, + }, + { + name: "--imported-model-kms-key-id", + description: "The imported model is encrypted at rest using this key", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-model-invocation-job", description: - "Creates a job to invoke a model on multiple prompts (batch inference). Format your data according to Format your inference data and upload it to an Amazon S3 bucket. For more information, see Create a batch inference job. The response returns a jobArn that you can use to stop or get details about the job. You can check the status of the job by sending a GetModelCustomizationJob request", + "Creates a batch inference job to invoke a model on multiple prompts. Format your data according to Format your inference data and upload it to an Amazon S3 bucket. For more information, see Process multiple prompts with batch inference. The response returns a jobArn that you can use to stop or get details about the job", options: [ { name: "--job-name", @@ -500,6 +620,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--vpc-config", + description: + "The configuration of the Virtual Private Cloud (VPC) for the data in the batch inference job. For more information, see Protect batch inference jobs using a VPC", + args: { + name: "structure", + }, + }, { name: "--timeout-duration-in-hours", description: @@ -675,6 +803,37 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-imported-model", + description: + "Deletes a custom model that you imported earlier. For more information, see Import a customized model in the Amazon Bedrock User Guide", + options: [ + { + name: "--model-identifier", + description: "Name of the imported model to delete", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-model-invocation-logging-configuration", description: "Delete the invocation logging", @@ -863,6 +1022,69 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-imported-model", + description: + "Gets properties associated with a customized model you imported", + options: [ + { + name: "--model-identifier", + description: + "Name or Amazon Resource Name (ARN) of the imported model", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-inference-profile", + description: + "Gets information about an inference profile. For more information, see the Amazon Bedrock User Guide", + options: [ + { + name: "--inference-profile-identifier", + description: "The unique identifier of the inference profile", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-model-copy-job", description: @@ -925,6 +1147,37 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-model-import-job", + description: + "Retrieves the properties associated with import model job, including the status of the job. For more information, see Import a customized model in the Amazon Bedrock User Guide", + options: [ + { + name: "--job-identifier", + description: "The identifier of the import job", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-model-invocation-job", description: @@ -1374,6 +1627,173 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-imported-models", + description: + "Returns a list of models you've imported. You can filter the results to return based on one or more criteria. For more information, see Import a customized model in the Amazon Bedrock User Guide", + options: [ + { + name: "--creation-time-before", + description: + "Return imported models that created before the specified time", + args: { + name: "timestamp", + }, + }, + { + name: "--creation-time-after", + description: + "Return imported models that were created after the specified time", + args: { + name: "timestamp", + }, + }, + { + name: "--name-contains", + description: + "Return imported models only if the model name contains these characters", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: + "If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results", + args: { + name: "string", + }, + }, + { + name: "--sort-by", + description: + "The field to sort by in the returned list of imported models", + args: { + name: "string", + }, + }, + { + name: "--sort-order", + description: + "Specifies whetehr to sort the results in ascending or descending order", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-inference-profiles", + description: "Returns a list of inference profiles that you can use", + options: [ + { + name: "--max-results", + description: + "The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: + "If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-model-copy-jobs", description: @@ -1611,6 +2031,117 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-model-import-jobs", + description: + "Returns a list of import jobs you've submitted. You can filter the results to return based on one or more criteria. For more information, see Import a customized model in the Amazon Bedrock User Guide", + options: [ + { + name: "--creation-time-after", + description: + "Return import jobs that were created after the specified time", + args: { + name: "timestamp", + }, + }, + { + name: "--creation-time-before", + description: + "Return import jobs that were created before the specified time", + args: { + name: "timestamp", + }, + }, + { + name: "--status-equals", + description: "Return imported jobs with the specified status", + args: { + name: "string", + }, + }, + { + name: "--name-contains", + description: + "Return imported jobs only if the job name contains these characters", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: + "If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results", + args: { + name: "string", + }, + }, + { + name: "--sort-by", + description: + "The field to sort by in the returned list of imported jobs", + args: { + name: "string", + }, + }, + { + name: "--sort-order", + description: + "Specifies whether to sort the results in ascending or descending order", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-model-invocation-jobs", description: diff --git a/src/aws/chatbot.ts b/src/aws/chatbot.ts index 208c2a9f0352..0e3b3ce98812 100644 --- a/src/aws/chatbot.ts +++ b/src/aws/chatbot.ts @@ -1,22 +1,23 @@ const completionSpec: Fig.Spec = { name: "chatbot", - description: "AWS Chatbot API", + description: + "The AWS Chatbot API Reference provides descriptions, API request parameters, and the XML response for each of the AWS Chatbot API actions. AWS Chatbot APIs are currently available in the following Regions: US East (Ohio) - us-east-2 US West (Oregon) - us-west-2 Asia Pacific (Singapore) - ap-southeast-1 Europe (Ireland) - eu-west-1 The AWS Chatbot console can only be used in US East (Ohio). Your configuration data however, is stored in each of the relevant available Regions. Your AWS CloudTrail events are logged in whatever Region you call from, not US East (N. Virginia) by default", subcommands: [ { name: "create-chime-webhook-configuration", - description: "Creates Chime Webhook Configuration", + description: "Creates an AWS Chatbot configuration for Amazon Chime", options: [ { name: "--webhook-description", description: - "Description of the webhook. Recommend using the convention `RoomName/WebhookName`. See Chime setup tutorial for more details: https://docs.aws.amazon.com/chatbot/latest/adminguide/chime-setup.html", + "A description of the webhook. We recommend using the convention RoomName/WebhookName. For more information, see Tutorial: Get started with Amazon Chime in the AWS Chatbot Administrator Guide", args: { name: "string", }, }, { name: "--webhook-url", - description: "URL for the Chime webhook", + description: "The URL for the Amazon Chime webhook", args: { name: "string", }, @@ -24,7 +25,7 @@ const completionSpec: Fig.Spec = { { name: "--sns-topic-arns", description: - "The ARNs of the SNS topics that deliver notifications to AWS Chatbot", + "The Amazon Resource Names (ARNs) of the SNS topics that deliver notifications to AWS Chatbot", args: { name: "list", }, @@ -32,7 +33,7 @@ const completionSpec: Fig.Spec = { { name: "--iam-role-arn", description: - "This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot", + "A user-defined role that AWS Chatbot assumes. This is not the service-linked role. For more information, see IAM policies for AWS Chatbot in the AWS Chatbot Administrator Guide", args: { name: "string", }, @@ -53,7 +54,8 @@ const completionSpec: Fig.Spec = { }, { name: "--tags", - description: "A list of tags to apply to the configuration", + description: + "A map of tags assigned to a resource. A tag is a string-to-string map of key-value pairs", args: { name: "list", }, @@ -79,7 +81,7 @@ const completionSpec: Fig.Spec = { }, { name: "create-microsoft-teams-channel-configuration", - description: "Creates MS Teams Channel Configuration", + description: "Creates an AWS Chatbot configuration for Microsoft Teams", options: [ { name: "--channel-id", @@ -98,7 +100,7 @@ const completionSpec: Fig.Spec = { { name: "--team-id", description: - "The ID of the Microsoft Team authorized with AWS Chatbot. To get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more details, see steps 1-4 in Get started with Microsoft Teams in the AWS Chatbot Administrator Guide", + "The ID of the Microsoft Teams authorized with AWS Chatbot. To get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more information, see Step 1: Configure a Microsoft Teams client in the AWS Chatbot Administrator Guide", args: { name: "string", }, @@ -120,7 +122,7 @@ const completionSpec: Fig.Spec = { { name: "--sns-topic-arns", description: - "The ARNs of the SNS topics that deliver notifications to AWS Chatbot", + "The Amazon Resource Names (ARNs) of the SNS topics that deliver notifications to AWS Chatbot", args: { name: "list", }, @@ -128,7 +130,7 @@ const completionSpec: Fig.Spec = { { name: "--iam-role-arn", description: - "The ARN of the IAM role that defines the permissions for AWS Chatbot. This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot", + "A user-defined role that AWS Chatbot assumes. This is not the service-linked role. For more information, see IAM policies for AWS Chatbot in the AWS Chatbot Administrator Guide", args: { name: "string", }, @@ -150,7 +152,7 @@ const completionSpec: Fig.Spec = { { name: "--guardrail-policy-arns", description: - "The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied by default if this is not set", + "The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed AdministratorAccess policy is applied by default if this is not set", args: { name: "list", }, @@ -167,7 +169,8 @@ const completionSpec: Fig.Spec = { }, { name: "--tags", - description: "A list of tags to apply to the configuration", + description: + "A map of tags assigned to a resource. A tag is a string-to-string map of key-value pairs", args: { name: "list", }, @@ -193,7 +196,7 @@ const completionSpec: Fig.Spec = { }, { name: "create-slack-channel-configuration", - description: "Creates Slack Channel Configuration", + description: "Creates an AWS Chatbot confugration for Slack", options: [ { name: "--slack-team-id", @@ -206,14 +209,14 @@ const completionSpec: Fig.Spec = { { name: "--slack-channel-id", description: - "The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ", + "The ID of the Slack channel. To get this ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ", args: { name: "string", }, }, { name: "--slack-channel-name", - description: "The name of the Slack Channel", + description: "The name of the Slack channel", args: { name: "string", }, @@ -221,7 +224,7 @@ const completionSpec: Fig.Spec = { { name: "--sns-topic-arns", description: - "The ARNs of the SNS topics that deliver notifications to AWS Chatbot", + "The Amazon Resource Names (ARNs) of the SNS topics that deliver notifications to AWS Chatbot", args: { name: "list", }, @@ -229,7 +232,7 @@ const completionSpec: Fig.Spec = { { name: "--iam-role-arn", description: - "The ARN of the IAM role that defines the permissions for AWS Chatbot. This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot", + "A user-defined role that AWS Chatbot assumes. This is not the service-linked role. For more information, see IAM policies for AWS Chatbot in the AWS Chatbot Administrator Guide", args: { name: "string", }, @@ -251,7 +254,7 @@ const completionSpec: Fig.Spec = { { name: "--guardrail-policy-arns", description: - "The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied by default if this is not set", + "The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed AdministratorAccess policy is applied by default if this is not set", args: { name: "list", }, @@ -268,7 +271,8 @@ const completionSpec: Fig.Spec = { }, { name: "--tags", - description: "A list of tags to apply to the configuration", + description: + "A map of tags assigned to a resource. A tag is a string-to-string map of key-value pairs", args: { name: "list", }, @@ -294,11 +298,13 @@ const completionSpec: Fig.Spec = { }, { name: "delete-chime-webhook-configuration", - description: "Deletes a Chime Webhook Configuration", + description: + "Deletes a Amazon Chime webhook configuration for AWS Chatbot", options: [ { name: "--chat-configuration-arn", - description: "The ARN of the ChimeWebhookConfiguration to delete", + description: + "The Amazon Resource Name (ARN) of the ChimeWebhookConfiguration to delete", args: { name: "string", }, @@ -324,12 +330,13 @@ const completionSpec: Fig.Spec = { }, { name: "delete-microsoft-teams-channel-configuration", - description: "Deletes MS Teams Channel Configuration", + description: + "Deletes a Microsoft Teams channel configuration for AWS Chatbot", options: [ { name: "--chat-configuration-arn", description: - "The ARN of the MicrosoftTeamsChannelConfiguration to delete", + "The Amazon Resource Name (ARN) of the MicrosoftTeamsChannelConfiguration associated with the user identity to delete", args: { name: "string", }, @@ -361,7 +368,7 @@ const completionSpec: Fig.Spec = { { name: "--team-id", description: - "The ID of the Microsoft Team authorized with AWS Chatbot. To get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more details, see steps 1-4 in Get started with Microsoft Teams in the AWS Chatbot Administrator Guide", + "The ID of the Microsoft Teams team authorized with AWS Chatbot. To get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more information, see Step 1: Configure a Microsoft Teams client in the AWS Chatbot Administrator Guide", args: { name: "string", }, @@ -387,7 +394,8 @@ const completionSpec: Fig.Spec = { }, { name: "delete-microsoft-teams-user-identity", - description: "Deletes a Teams user identity", + description: + "Identifes a user level permission for a channel configuration", options: [ { name: "--chat-configuration-arn", @@ -399,7 +407,7 @@ const completionSpec: Fig.Spec = { }, { name: "--user-id", - description: "Id from Microsoft Teams for user", + description: "The Microsoft Teams user ID", args: { name: "string", }, @@ -425,11 +433,12 @@ const completionSpec: Fig.Spec = { }, { name: "delete-slack-channel-configuration", - description: "Deletes Slack Channel Configuration", + description: "Deletes a Slack channel configuration for AWS Chatbot", options: [ { name: "--chat-configuration-arn", - description: "The ARN of the SlackChannelConfiguration to delete", + description: + "The Amazon Resource Name (ARN) of the SlackChannelConfiguration to delete", args: { name: "string", }, @@ -455,7 +464,8 @@ const completionSpec: Fig.Spec = { }, { name: "delete-slack-user-identity", - description: "Deletes a Slack user identity", + description: + "Deletes a user level permission for a Slack channel configuration", options: [ { name: "--chat-configuration-arn", @@ -534,7 +544,7 @@ const completionSpec: Fig.Spec = { { name: "describe-chime-webhook-configurations", description: - "Lists Chime Webhook Configurations optionally filtered by ChatConfigurationArn", + "Lists Amazon Chime webhook configurations optionally filtered by ChatConfigurationArn", options: [ { name: "--max-results", @@ -555,7 +565,7 @@ const completionSpec: Fig.Spec = { { name: "--chat-configuration-arn", description: - "An optional ARN of a ChimeWebhookConfiguration to describe", + "An optional Amazon Resource Number (ARN) of a ChimeWebhookConfiguration to describe", args: { name: "string", }, @@ -568,6 +578,30 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, { name: "--generate-cli-skeleton", description: @@ -582,7 +616,7 @@ const completionSpec: Fig.Spec = { { name: "describe-slack-channel-configurations", description: - "Lists Slack Channel Configurations optionally filtered by ChatConfigurationArn", + "Lists Slack channel configurations optionally filtered by ChatConfigurationArn", options: [ { name: "--max-results", @@ -603,7 +637,7 @@ const completionSpec: Fig.Spec = { { name: "--chat-configuration-arn", description: - "An optional ARN of a SlackChannelConfiguration to describe", + "An optional Amazon Resource Number (ARN) of a SlackChannelConfiguration to describe", args: { name: "string", }, @@ -616,6 +650,30 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, { name: "--generate-cli-skeleton", description: @@ -634,7 +692,7 @@ const completionSpec: Fig.Spec = { { name: "--chat-configuration-arn", description: - "The ARN of the SlackChannelConfiguration associated with the user identities to describe", + "The Amazon Resource Number (ARN) of the SlackChannelConfiguration associated with the user identities to describe", args: { name: "string", }, @@ -663,6 +721,30 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, { name: "--generate-cli-skeleton", description: @@ -676,7 +758,8 @@ const completionSpec: Fig.Spec = { }, { name: "describe-slack-workspaces", - description: "Lists all authorized Slack Workspaces for AWS Account", + description: + "List all authorized Slack workspaces connected to the AWS Account onboarded with AWS Chatbot", options: [ { name: "--max-results", @@ -702,6 +785,30 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, { name: "--generate-cli-skeleton", description: @@ -715,7 +822,7 @@ const completionSpec: Fig.Spec = { }, { name: "get-account-preferences", - description: "Get Chatbot account level preferences", + description: "Returns AWS Chatbot account preferences", options: [ { name: "--cli-input-json", @@ -738,12 +845,13 @@ const completionSpec: Fig.Spec = { }, { name: "get-microsoft-teams-channel-configuration", - description: "Get a single MS Teams Channel Configurations", + description: + "Returns a Microsoft Teams channel configuration in an AWS account", options: [ { name: "--chat-configuration-arn", description: - "The ARN of the MicrosoftTeamsChannelConfiguration to retrieve", + "The Amazon Resource Number (ARN) of the MicrosoftTeamsChannelConfiguration to retrieve", args: { name: "string", }, @@ -770,7 +878,7 @@ const completionSpec: Fig.Spec = { { name: "list-microsoft-teams-channel-configurations", description: - "Lists MS Teams Channel Configurations optionally filtered by TeamId", + "Lists all AWS Chatbot Microsoft Teams channel configurations in an AWS account", options: [ { name: "--max-results", @@ -791,7 +899,7 @@ const completionSpec: Fig.Spec = { { name: "--team-id", description: - "The ID of the Microsoft Team authorized with AWS Chatbot. To get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more details, see steps 1-4 in Get started with Microsoft Teams in the AWS Chatbot Administrator Guide", + "The ID of the Microsoft Teams authorized with AWS Chatbot. To get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more information, see Step 1: Configure a Microsoft Teams client in the AWS Chatbot Administrator Guide", args: { name: "string", }, @@ -804,6 +912,30 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, { name: "--generate-cli-skeleton", description: @@ -817,7 +949,7 @@ const completionSpec: Fig.Spec = { }, { name: "list-microsoft-teams-configured-teams", - description: "Lists all authorized MS teams for AWS Account", + description: "Lists all authorized Microsoft Teams for an AWS Account", options: [ { name: "--max-results", @@ -843,6 +975,30 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, { name: "--generate-cli-skeleton", description: @@ -857,12 +1013,12 @@ const completionSpec: Fig.Spec = { { name: "list-microsoft-teams-user-identities", description: - "Lists all Microsoft Teams user identities with a mapped role", + "A list all Microsoft Teams user identities with a mapped role", options: [ { name: "--chat-configuration-arn", description: - "The ARN of the MicrosoftTeamsChannelConfiguration associated with the user identities to list", + "The Amazon Resource Number (ARN) of the MicrosoftTeamsChannelConfiguration associated with the user identities to list", args: { name: "string", }, @@ -891,6 +1047,30 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, { name: "--generate-cli-skeleton", description: @@ -904,11 +1084,12 @@ const completionSpec: Fig.Spec = { }, { name: "list-tags-for-resource", - description: "Retrieves the list of tags applied to a configuration", + description: + "Lists all of the tags associated with the Amazon Resource Name (ARN) that you specify. The resource can be a user, server, or role", options: [ { name: "--resource-arn", - description: "The ARN of the configuration", + description: "The ARN you specified to list the tags of", args: { name: "string", }, @@ -934,7 +1115,8 @@ const completionSpec: Fig.Spec = { }, { name: "tag-resource", - description: "Applies the supplied tags to a configuration", + description: + "Attaches a key-value pair to a resource, as identified by its Amazon Resource Name (ARN). Resources are users, servers, roles, and other entities", options: [ { name: "--resource-arn", @@ -971,18 +1153,21 @@ const completionSpec: Fig.Spec = { }, { name: "untag-resource", - description: "Removes the supplied tags from a configuration", + description: + "Detaches a key-value pair from a resource, as identified by its Amazon Resource Name (ARN). Resources are users, servers, roles, and other entities", options: [ { name: "--resource-arn", - description: "The ARN of the configuration", + description: + "The value of the resource that will have the tag removed. An Amazon Resource Name (ARN) is an identifier for a specific AWS resource, such as a server, user, or role", args: { name: "string", }, }, { name: "--tag-keys", - description: "A list of tag keys to remove from the configuration", + description: + "TagKeys are key-value pairs assigned to ARNs that can be used to group and search for resources by type. This metadata can be attached to resources for any purpose", args: { name: "list", }, @@ -1008,7 +1193,7 @@ const completionSpec: Fig.Spec = { }, { name: "update-account-preferences", - description: "Update Chatbot account level preferences", + description: "Updates AWS Chatbot account preferences", options: [ { name: "--user-authorization-required", @@ -1023,12 +1208,12 @@ const completionSpec: Fig.Spec = { { name: "--training-data-collection-enabled", description: - "Turns on training data collection. This helps improve the AWS Chatbot experience by allowing AWS Chatbot to store and use your customer information, such as AWS Chatbot configurations, notifications, user inputs, AWS Chatbot generated responses, and interaction data. This data helps us to continuously improve and develop Artificial Intelligence (AI) technologies. Your data is not shared with any third parties and is protected using sophisticated controls to prevent unauthorized access and misuse. AWS Chatbot does not store or use interactions in chat channels with Amazon Q for training AWS Chatbot\u2019s AI technologies", + "Turns on training data collection. This helps improve the AWS Chatbot experience by allowing AWS Chatbot to store and use your customer information, such as AWS Chatbot configurations, notifications, user inputs, AWS Chatbot generated responses, and interaction data. This data helps us to continuously improve and develop Artificial Intelligence (AI) technologies. Your data is not shared with any third parties and is protected using sophisticated controls to prevent unauthorized access and misuse. AWS Chatbot does not store or use interactions in chat channels with Amazon Q for training AI technologies for AWS Chatbot", }, { name: "--no-training-data-collection-enabled", description: - "Turns on training data collection. This helps improve the AWS Chatbot experience by allowing AWS Chatbot to store and use your customer information, such as AWS Chatbot configurations, notifications, user inputs, AWS Chatbot generated responses, and interaction data. This data helps us to continuously improve and develop Artificial Intelligence (AI) technologies. Your data is not shared with any third parties and is protected using sophisticated controls to prevent unauthorized access and misuse. AWS Chatbot does not store or use interactions in chat channels with Amazon Q for training AWS Chatbot\u2019s AI technologies", + "Turns on training data collection. This helps improve the AWS Chatbot experience by allowing AWS Chatbot to store and use your customer information, such as AWS Chatbot configurations, notifications, user inputs, AWS Chatbot generated responses, and interaction data. This data helps us to continuously improve and develop Artificial Intelligence (AI) technologies. Your data is not shared with any third parties and is protected using sophisticated controls to prevent unauthorized access and misuse. AWS Chatbot does not store or use interactions in chat channels with Amazon Q for training AI technologies for AWS Chatbot", }, { name: "--cli-input-json", @@ -1051,11 +1236,12 @@ const completionSpec: Fig.Spec = { }, { name: "update-chime-webhook-configuration", - description: "Updates a Chime Webhook Configuration", + description: "Updates a Amazon Chime webhook configuration", options: [ { name: "--chat-configuration-arn", - description: "The ARN of the ChimeWebhookConfiguration to update", + description: + "The Amazon Resource Number (ARN) of the ChimeWebhookConfiguration to update", args: { name: "string", }, @@ -1063,14 +1249,14 @@ const completionSpec: Fig.Spec = { { name: "--webhook-description", description: - "Description of the webhook. Recommend using the convention `RoomName/WebhookName`. See Chime setup tutorial for more details: https://docs.aws.amazon.com/chatbot/latest/adminguide/chime-setup.html", + "A description of the webhook. We recommend using the convention RoomName/WebhookName. For more information, see Tutorial: Get started with Amazon Chime in the AWS Chatbot Administrator Guide", args: { name: "string", }, }, { name: "--webhook-url", - description: "URL for the Chime webhook", + description: "The URL for the Amazon Chime webhook", args: { name: "string", }, @@ -1086,7 +1272,7 @@ const completionSpec: Fig.Spec = { { name: "--iam-role-arn", description: - "The ARN of the IAM role that defines the permissions for AWS Chatbot. This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot", + "A user-defined role that AWS Chatbot assumes. This is not the service-linked role. For more information, see IAM policies for AWS Chatbot in the AWS Chatbot Administrator Guide", args: { name: "string", }, @@ -1119,12 +1305,12 @@ const completionSpec: Fig.Spec = { }, { name: "update-microsoft-teams-channel-configuration", - description: "Updates MS Teams Channel Configuration", + description: "Updates an Microsoft Teams channel configuration", options: [ { name: "--chat-configuration-arn", description: - "The ARN of the MicrosoftTeamsChannelConfiguration to update", + "The Amazon Resource Number (ARN) of the TeamsChannelConfiguration to update", args: { name: "string", }, @@ -1146,7 +1332,7 @@ const completionSpec: Fig.Spec = { { name: "--sns-topic-arns", description: - "The ARNs of the SNS topics that deliver notifications to AWS Chatbot", + "The Amazon Resource Names (ARNs) of the SNS topics that deliver notifications to AWS Chatbot", args: { name: "list", }, @@ -1154,7 +1340,7 @@ const completionSpec: Fig.Spec = { { name: "--iam-role-arn", description: - "The ARN of the IAM role that defines the permissions for AWS Chatbot. This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot", + "A user-defined role that AWS Chatbot assumes. This is not the service-linked role. For more information, see IAM policies for AWS Chatbot in the AWS Chatbot Administrator Guide", args: { name: "string", }, @@ -1169,7 +1355,7 @@ const completionSpec: Fig.Spec = { { name: "--guardrail-policy-arns", description: - "The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied by default if this is not set", + "The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed AdministratorAccess policy is applied by default if this is not set", args: { name: "list", }, @@ -1205,11 +1391,12 @@ const completionSpec: Fig.Spec = { }, { name: "update-slack-channel-configuration", - description: "Updates Slack Channel Configuration", + description: "Updates a Slack channel configuration", options: [ { name: "--chat-configuration-arn", - description: "The ARN of the SlackChannelConfiguration to update", + description: + "The Amazon Resource Number (ARN) of the SlackChannelConfiguration to update", args: { name: "string", }, @@ -1217,14 +1404,14 @@ const completionSpec: Fig.Spec = { { name: "--slack-channel-id", description: - "The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ", + "The ID of the Slack channel. To get this ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ", args: { name: "string", }, }, { name: "--slack-channel-name", - description: "The name of the Slack Channel", + description: "The name of the Slack channel", args: { name: "string", }, @@ -1232,7 +1419,7 @@ const completionSpec: Fig.Spec = { { name: "--sns-topic-arns", description: - "The ARNs of the SNS topics that deliver notifications to AWS Chatbot", + "The Amazon Resource Names (ARNs) of the SNS topics that deliver notifications to AWS Chatbot", args: { name: "list", }, @@ -1240,7 +1427,7 @@ const completionSpec: Fig.Spec = { { name: "--iam-role-arn", description: - "The ARN of the IAM role that defines the permissions for AWS Chatbot. This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot", + "A user-defined role that AWS Chatbot assumes. This is not the service-linked role. For more information, see IAM policies for AWS Chatbot in the AWS Chatbot Administrator Guide", args: { name: "string", }, @@ -1255,7 +1442,7 @@ const completionSpec: Fig.Spec = { { name: "--guardrail-policy-arns", description: - "The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied by default if this is not set", + "The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed AdministratorAccess policy is applied by default if this is not set", args: { name: "list", }, diff --git a/src/aws/chime-sdk-voice.ts b/src/aws/chime-sdk-voice.ts index 559faae5118c..4221c4c2c828 100644 --- a/src/aws/chime-sdk-voice.ts +++ b/src/aws/chime-sdk-voice.ts @@ -1837,7 +1837,7 @@ const completionSpec: Fig.Spec = { }, { name: "--voice-tone-analysis-task-id", - description: "The ID of the voice tone anlysis task", + description: "The ID of the voice tone analysis task", args: { name: "string", }, @@ -3713,7 +3713,8 @@ const completionSpec: Fig.Spec = { }, { name: "--country", - description: "The country in the address being validated", + description: + "The country in the address being validated as two-letter country code in ISO 3166-1 alpha-2 format, such as US. For more information, see ISO 3166-1 alpha-2 in Wikipedia", args: { name: "string", }, diff --git a/src/aws/codebuild.ts b/src/aws/codebuild.ts index 2d106c8ff117..e6d3bb716fcf 100644 --- a/src/aws/codebuild.ts +++ b/src/aws/codebuild.ts @@ -239,7 +239,7 @@ const completionSpec: Fig.Spec = { { name: "--environment-type", description: - "The environment type of the compute fleet. The environment type ARM_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo). The environment type LINUX_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai). The environment type LINUX_GPU_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney). The environment type WINDOWS_SERVER_2019_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland). The environment type WINDOWS_SERVER_2022_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai). For more information, see Build environment compute types in the CodeBuild user guide", + "The environment type of the compute fleet. The environment type ARM_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo). The environment type LINUX_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai). The environment type LINUX_GPU_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney). The environment type MAC_ARM is available for Medium fleets only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), and EU (Frankfurt) The environment type MAC_ARM is available for Large fleets only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), and Asia Pacific (Sydney). The environment type WINDOWS_SERVER_2019_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland). The environment type WINDOWS_SERVER_2022_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai). For more information, see Build environment compute types in the CodeBuild user guide", args: { name: "string", }, @@ -2872,7 +2872,7 @@ const completionSpec: Fig.Spec = { { name: "--environment-type", description: - "The environment type of the compute fleet. The environment type ARM_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo). The environment type LINUX_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai). The environment type LINUX_GPU_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney). The environment type WINDOWS_SERVER_2019_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland). The environment type WINDOWS_SERVER_2022_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai). For more information, see Build environment compute types in the CodeBuild user guide", + "The environment type of the compute fleet. The environment type ARM_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo). The environment type LINUX_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai). The environment type LINUX_GPU_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney). The environment type MAC_ARM is available for Medium fleets only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), and EU (Frankfurt) The environment type MAC_ARM is available for Large fleets only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), and Asia Pacific (Sydney). The environment type WINDOWS_SERVER_2019_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland). The environment type WINDOWS_SERVER_2022_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai). For more information, see Build environment compute types in the CodeBuild user guide", args: { name: "string", }, diff --git a/src/aws/codepipeline.ts b/src/aws/codepipeline.ts index bed36f4099f8..f8e3c6f624dc 100644 --- a/src/aws/codepipeline.ts +++ b/src/aws/codepipeline.ts @@ -1174,7 +1174,7 @@ const completionSpec: Fig.Spec = { { name: "list-webhooks", description: - "Gets a listing of all the webhooks in this Amazon Web Services Region for this account. The output lists all webhooks and includes the webhook URL and ARN and the configuration for each webhook", + "Gets a listing of all the webhooks in this Amazon Web Services Region for this account. The output lists all webhooks and includes the webhook URL and ARN and the configuration for each webhook. If a secret token was provided, it will be redacted in the response", options: [ { name: "--next-token", @@ -1706,7 +1706,7 @@ const completionSpec: Fig.Spec = { { name: "put-webhook", description: - "Defines a webhook and returns a unique webhook URL generated by CodePipeline. This URL can be supplied to third party source hosting providers to call every time there's a code change. When CodePipeline receives a POST request on this URL, the pipeline defined in the webhook is started as long as the POST request satisfied the authentication and filtering requirements supplied when defining the webhook. RegisterWebhookWithThirdParty and DeregisterWebhookWithThirdParty APIs can be used to automatically configure supported third parties to call the generated webhook URL", + "Defines a webhook and returns a unique webhook URL generated by CodePipeline. This URL can be supplied to third party source hosting providers to call every time there's a code change. When CodePipeline receives a POST request on this URL, the pipeline defined in the webhook is started as long as the POST request satisfied the authentication and filtering requirements supplied when defining the webhook. RegisterWebhookWithThirdParty and DeregisterWebhookWithThirdParty APIs can be used to automatically configure supported third parties to call the generated webhook URL. When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. If a secret token was provided, it will be redacted in the response", options: [ { name: "--webhook", diff --git a/src/aws/cognito-idp.ts b/src/aws/cognito-idp.ts index 8f9925feb04e..7e851371e478 100644 --- a/src/aws/cognito-idp.ts +++ b/src/aws/cognito-idp.ts @@ -141,7 +141,7 @@ const completionSpec: Fig.Spec = { { name: "admin-create-user", description: - "Creates a new user in the specified user pool. If MessageAction isn't set, the default is to send a welcome message via email or phone (SMS). This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. This message is based on a template that you configured in your call to create or update a user pool. This template includes your custom sign-up instructions and placeholders for user name and temporary password. Alternatively, you can call AdminCreateUser with SUPPRESS for the MessageAction parameter, and Amazon Cognito won't send any email. In either case, the user will be in the FORCE_CHANGE_PASSWORD state until they sign in and change their password. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints", + "Creates a new user in the specified user pool. If MessageAction isn't set, the default is to send a welcome message via email or phone (SMS). This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Servicesservice, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. This message is based on a template that you configured in your call to create or update a user pool. This template includes your custom sign-up instructions and placeholders for user name and temporary password. Alternatively, you can call AdminCreateUser with SUPPRESS for the MessageAction parameter, and Amazon Cognito won't send any email. In either case, the user will be in the FORCE_CHANGE_PASSWORD state until they sign in and change their password. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints", options: [ { name: "--user-pool-id", @@ -577,7 +577,7 @@ const completionSpec: Fig.Spec = { { name: "admin-initiate-auth", description: - "Initiates the authentication flow, as an administrator. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints", + "Initiates the authentication flow, as an administrator. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Servicesservice, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints", options: [ { name: "--user-pool-id", @@ -958,7 +958,7 @@ const completionSpec: Fig.Spec = { { name: "admin-reset-user-password", description: - "Resets the specified user's password in a user pool as an administrator. Works on any user. To use this API operation, your user pool must have self-service account recovery configured. Use AdminSetUserPassword if you manage passwords as an administrator. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Deactivates a user's password, requiring them to change it. If a user tries to sign in after the API is called, Amazon Cognito responds with a PasswordResetRequiredException error. Your app must then perform the actions that reset your user's password: the forgot-password flow. In addition, if the user pool has phone verification selected and a verified phone number exists for the user, or if email verification is selected and a verified email exists for the user, calling this API will also result in sending a message to the end user with the code to change their password. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints", + "Resets the specified user's password in a user pool as an administrator. Works on any user. To use this API operation, your user pool must have self-service account recovery configured. Use AdminSetUserPassword if you manage passwords as an administrator. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Servicesservice, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Deactivates a user's password, requiring them to change it. If a user tries to sign in after the API is called, Amazon Cognito responds with a PasswordResetRequiredException error. Your app must then perform the actions that reset your user's password: the forgot-password flow. In addition, if the user pool has phone verification selected and a verified phone number exists for the user, or if email verification is selected and a verified email exists for the user, calling this API will also result in sending a message to the end user with the code to change their password. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints", options: [ { name: "--user-pool-id", @@ -1006,7 +1006,7 @@ const completionSpec: Fig.Spec = { { name: "admin-respond-to-auth-challenge", description: - "Some API operations in a user pool generate a challenge, like a prompt for an MFA code, for device authentication that bypasses MFA, or for a custom authentication challenge. An AdminRespondToAuthChallenge API request provides the answer to that challenge, like a code or a secure remote password (SRP). The parameters of a response to an authentication challenge vary with the type of challenge. For more information about custom authentication challenges, see Custom authentication challenge Lambda triggers. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints", + "Some API operations in a user pool generate a challenge, like a prompt for an MFA code, for device authentication that bypasses MFA, or for a custom authentication challenge. An AdminRespondToAuthChallenge API request provides the answer to that challenge, like a code or a secure remote password (SRP). The parameters of a response to an authentication challenge vary with the type of challenge. For more information about custom authentication challenges, see Custom authentication challenge Lambda triggers. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Servicesservice, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints", options: [ { name: "--user-pool-id", @@ -1033,7 +1033,7 @@ const completionSpec: Fig.Spec = { { name: "--challenge-responses", description: - 'The responses to the challenge that you received in the previous request. Each challenge has its own required response parameters. The following examples are partial JSON request bodies that highlight challenge-response parameters. You must provide a SECRET_HASH parameter in all challenge responses to an app client that has a client secret. SMS_MFA "ChallengeName": "SMS_MFA", "ChallengeResponses": {"SMS_MFA_CODE": "[SMS_code]", "USERNAME": "[username]"} PASSWORD_VERIFIER "ChallengeName": "PASSWORD_VERIFIER", "ChallengeResponses": {"PASSWORD_CLAIM_SIGNATURE": "[claim_signature]", "PASSWORD_CLAIM_SECRET_BLOCK": "[secret_block]", "TIMESTAMP": [timestamp], "USERNAME": "[username]"} Add "DEVICE_KEY" when you sign in with a remembered device. CUSTOM_CHALLENGE "ChallengeName": "CUSTOM_CHALLENGE", "ChallengeResponses": {"USERNAME": "[username]", "ANSWER": "[challenge_answer]"} Add "DEVICE_KEY" when you sign in with a remembered device. NEW_PASSWORD_REQUIRED "ChallengeName": "NEW_PASSWORD_REQUIRED", "ChallengeResponses": {"NEW_PASSWORD": "[new_password]", "USERNAME": "[username]"} To set any required attributes that InitiateAuth returned in an requiredAttributes parameter, add "userAttributes.[attribute_name]": "[attribute_value]". This parameter can also set values for writable attributes that aren\'t required by your user pool. In a NEW_PASSWORD_REQUIRED challenge response, you can\'t modify a required attribute that already has a value. In RespondToAuthChallenge, set a value for any keys that Amazon Cognito returned in the requiredAttributes parameter, then use the UpdateUserAttributes API operation to modify the value of any additional attributes. SOFTWARE_TOKEN_MFA "ChallengeName": "SOFTWARE_TOKEN_MFA", "ChallengeResponses": {"USERNAME": "[username]", "SOFTWARE_TOKEN_MFA_CODE": [authenticator_code]} DEVICE_SRP_AUTH "ChallengeName": "DEVICE_SRP_AUTH", "ChallengeResponses": {"USERNAME": "[username]", "DEVICE_KEY": "[device_key]", "SRP_A": "[srp_a]"} DEVICE_PASSWORD_VERIFIER "ChallengeName": "DEVICE_PASSWORD_VERIFIER", "ChallengeResponses": {"DEVICE_KEY": "[device_key]", "PASSWORD_CLAIM_SIGNATURE": "[claim_signature]", "PASSWORD_CLAIM_SECRET_BLOCK": "[secret_block]", "TIMESTAMP": [timestamp], "USERNAME": "[username]"} MFA_SETUP "ChallengeName": "MFA_SETUP", "ChallengeResponses": {"USERNAME": "[username]"}, "SESSION": "[Session ID from VerifySoftwareToken]" SELECT_MFA_TYPE "ChallengeName": "SELECT_MFA_TYPE", "ChallengeResponses": {"USERNAME": "[username]", "ANSWER": "[SMS_MFA or SOFTWARE_TOKEN_MFA]"} For more information about SECRET_HASH, see Computing secret hash values. For information about DEVICE_KEY, see Working with user devices in your user pool', + 'The responses to the challenge that you received in the previous request. Each challenge has its own required response parameters. The following examples are partial JSON request bodies that highlight challenge-response parameters. You must provide a SECRET_HASH parameter in all challenge responses to an app client that has a client secret. SMS_MFA "ChallengeName": "SMS_MFA", "ChallengeResponses": {"SMS_MFA_CODE": "[code]", "USERNAME": "[username]"} EMAIL_OTP "ChallengeName": "EMAIL_OTP", "ChallengeResponses": {"EMAIL_OTP_CODE": "[code]", "USERNAME": "[username]"} PASSWORD_VERIFIER This challenge response is part of the SRP flow. Amazon Cognito requires that your application respond to this challenge within a few seconds. When the response time exceeds this period, your user pool returns a NotAuthorizedException error. "ChallengeName": "PASSWORD_VERIFIER", "ChallengeResponses": {"PASSWORD_CLAIM_SIGNATURE": "[claim_signature]", "PASSWORD_CLAIM_SECRET_BLOCK": "[secret_block]", "TIMESTAMP": [timestamp], "USERNAME": "[username]"} Add "DEVICE_KEY" when you sign in with a remembered device. CUSTOM_CHALLENGE "ChallengeName": "CUSTOM_CHALLENGE", "ChallengeResponses": {"USERNAME": "[username]", "ANSWER": "[challenge_answer]"} Add "DEVICE_KEY" when you sign in with a remembered device. NEW_PASSWORD_REQUIRED "ChallengeName": "NEW_PASSWORD_REQUIRED", "ChallengeResponses": {"NEW_PASSWORD": "[new_password]", "USERNAME": "[username]"} To set any required attributes that InitiateAuth returned in an requiredAttributes parameter, add "userAttributes.[attribute_name]": "[attribute_value]". This parameter can also set values for writable attributes that aren\'t required by your user pool. In a NEW_PASSWORD_REQUIRED challenge response, you can\'t modify a required attribute that already has a value. In RespondToAuthChallenge, set a value for any keys that Amazon Cognito returned in the requiredAttributes parameter, then use the UpdateUserAttributes API operation to modify the value of any additional attributes. SOFTWARE_TOKEN_MFA "ChallengeName": "SOFTWARE_TOKEN_MFA", "ChallengeResponses": {"USERNAME": "[username]", "SOFTWARE_TOKEN_MFA_CODE": [authenticator_code]} DEVICE_SRP_AUTH "ChallengeName": "DEVICE_SRP_AUTH", "ChallengeResponses": {"USERNAME": "[username]", "DEVICE_KEY": "[device_key]", "SRP_A": "[srp_a]"} DEVICE_PASSWORD_VERIFIER "ChallengeName": "DEVICE_PASSWORD_VERIFIER", "ChallengeResponses": {"DEVICE_KEY": "[device_key]", "PASSWORD_CLAIM_SIGNATURE": "[claim_signature]", "PASSWORD_CLAIM_SECRET_BLOCK": "[secret_block]", "TIMESTAMP": [timestamp], "USERNAME": "[username]"} MFA_SETUP "ChallengeName": "MFA_SETUP", "ChallengeResponses": {"USERNAME": "[username]"}, "SESSION": "[Session ID from VerifySoftwareToken]" SELECT_MFA_TYPE "ChallengeName": "SELECT_MFA_TYPE", "ChallengeResponses": {"USERNAME": "[username]", "ANSWER": "[SMS_MFA or SOFTWARE_TOKEN_MFA]"} For more information about SECRET_HASH, see Computing secret hash values. For information about DEVICE_KEY, see Working with user devices in your user pool', args: { name: "map", }, @@ -1092,11 +1092,12 @@ const completionSpec: Fig.Spec = { { name: "admin-set-user-mfa-preference", description: - "The user's multi-factor authentication (MFA) preference, including which MFA options are activated, and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints", + "Sets the user's multi-factor authentication (MFA) preference, including which MFA options are activated, and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints", options: [ { name: "--sms-mfa-settings", - description: "The SMS text message MFA settings", + description: + "User preferences for SMS message MFA. Activates or deactivates SMS MFA and sets it as the preferred MFA method when multiple methods are available", args: { name: "structure", }, @@ -1104,7 +1105,15 @@ const completionSpec: Fig.Spec = { { name: "--software-token-mfa-settings", description: - "The time-based one-time password software token MFA settings", + "User preferences for time-based one-time password (TOTP) MFA. Activates or deactivates TOTP MFA and sets it as the preferred MFA method when multiple methods are available", + args: { + name: "structure", + }, + }, + { + name: "--email-mfa-settings", + description: + "User preferences for email message MFA. Activates or deactivates email MFA and sets it as the preferred MFA method when multiple methods are available. To activate this setting, advanced security features must be active in your user pool", args: { name: "structure", }, @@ -1119,7 +1128,8 @@ const completionSpec: Fig.Spec = { }, { name: "--user-pool-id", - description: "The user pool ID", + description: + "The ID of the user pool where you want to set a user's MFA preferences", args: { name: "string", }, @@ -1359,7 +1369,7 @@ const completionSpec: Fig.Spec = { { name: "admin-update-user-attributes", description: - "This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Updates the specified user's attributes, including developer attributes, as an administrator. Works on any user. To delete an attribute from your user, submit the attribute in your API request with a blank value. For custom attributes, you must prepend the custom: prefix to the attribute name. In addition to updating user attributes, this API can also be used to mark phone and email as verified. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints", + "This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Servicesservice, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Updates the specified user's attributes, including developer attributes, as an administrator. Works on any user. To delete an attribute from your user, submit the attribute in your API request with a blank value. For custom attributes, you must prepend the custom: prefix to the attribute name. In addition to updating user attributes, this API can also be used to mark phone and email as verified. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints", options: [ { name: "--user-pool-id", @@ -1998,7 +2008,7 @@ const completionSpec: Fig.Spec = { { name: "create-user-pool", description: - "This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Creates a new Amazon Cognito user pool and sets the password policy for the pool. If you don't provide a value for an attribute, Amazon Cognito sets it to its default value. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints", + "This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Servicesservice, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Creates a new Amazon Cognito user pool and sets the password policy for the pool. If you don't provide a value for an attribute, Amazon Cognito sets it to its default value. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints", options: [ { name: "--pool-name", @@ -2264,7 +2274,7 @@ const completionSpec: Fig.Spec = { { name: "--read-attributes", description: - "The list of user attributes that you want your app client to have read-only access to. After your user authenticates in your app, their access token authorizes them to read their own attribute value for any attribute in this list. An example of this kind of activity is when your user selects a link to view their profile information. Your app makes a GetUser API request to retrieve and display your user's profile data. When you don't specify the ReadAttributes for your app client, your app can read the values of email_verified, phone_number_verified, and the Standard attributes of your user pool. When your user pool has read access to these default attributes, ReadAttributes doesn't return any information. Amazon Cognito only populates ReadAttributes in the API response if you have specified your own custom set of read attributes", + "The list of user attributes that you want your app client to have read access to. After your user authenticates in your app, their access token authorizes them to read their own attribute value for any attribute in this list. An example of this kind of activity is when your user selects a link to view their profile information. Your app makes a GetUser API request to retrieve and display your user's profile data. When you don't specify the ReadAttributes for your app client, your app can read the values of email_verified, phone_number_verified, and the Standard attributes of your user pool. When your user pool app client has read access to these default attributes, ReadAttributes doesn't return any information. Amazon Cognito only populates ReadAttributes in the API response if you have specified your own custom set of read attributes", args: { name: "list", }, @@ -3040,7 +3050,7 @@ const completionSpec: Fig.Spec = { { name: "forgot-password", description: - "Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the Username parameter, you can use the username or user alias. The method used to send the confirmation code is sent according to the specified AccountRecoverySetting. For more information, see Recovering User Accounts in the Amazon Cognito Developer Guide. To use the confirmation code for resetting the password, call ConfirmForgotPassword. If neither a verified phone number nor a verified email exists, this API returns InvalidParameterException. If your app client has a client secret and you don't provide a SECRET_HASH parameter, this API returns NotAuthorizedException. To use this API operation, your user pool must have self-service account recovery configured. Use AdminSetUserPassword if you manage passwords as an administrator. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide", + "Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the Username parameter, you can use the username or user alias. The method used to send the confirmation code is sent according to the specified AccountRecoverySetting. For more information, see Recovering User Accounts in the Amazon Cognito Developer Guide. To use the confirmation code for resetting the password, call ConfirmForgotPassword. If neither a verified phone number nor a verified email exists, this API returns InvalidParameterException. If your app client has a client secret and you don't provide a SECRET_HASH parameter, this API returns NotAuthorizedException. To use this API operation, your user pool must have self-service account recovery configured. Use AdminSetUserPassword if you manage passwords as an administrator. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Servicesservice, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide", options: [ { name: "--client-id", @@ -3389,7 +3399,7 @@ const completionSpec: Fig.Spec = { { name: "get-user-attribute-verification-code", description: - "Generates a user attribute verification code for the specified attribute name. Sends a message to a user with a code that they must return in a VerifyUserAttribute request. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide", + "Generates a user attribute verification code for the specified attribute name. Sends a message to a user with a code that they must return in a VerifyUserAttribute request. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Servicesservice, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide", options: [ { name: "--access-token", @@ -3500,7 +3510,7 @@ const completionSpec: Fig.Spec = { { name: "initiate-auth", description: - "Initiates sign-in for a user in the Amazon Cognito user directory. You can't sign in a user with a federated IdP with InitiateAuth. For more information, see Adding user pool sign-in through a third party. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide", + "Initiates sign-in for a user in the Amazon Cognito user directory. You can't sign in a user with a federated IdP with InitiateAuth. For more information, see Adding user pool sign-in through a third party. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Servicesservice, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide", options: [ { name: "--auth-flow", @@ -4207,7 +4217,7 @@ const completionSpec: Fig.Spec = { { name: "resend-confirmation-code", description: - "Resends the confirmation (for confirmation of registration) to a specific user in the user pool. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide", + "Resends the confirmation (for confirmation of registration) to a specific user in the user pool. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Servicesservice, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide", options: [ { name: "--client-id", @@ -4278,7 +4288,7 @@ const completionSpec: Fig.Spec = { { name: "respond-to-auth-challenge", description: - "Some API operations in a user pool generate a challenge, like a prompt for an MFA code, for device authentication that bypasses MFA, or for a custom authentication challenge. A RespondToAuthChallenge API request provides the answer to that challenge, like a code or a secure remote password (SRP). The parameters of a response to an authentication challenge vary with the type of challenge. For more information about custom authentication challenges, see Custom authentication challenge Lambda triggers. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide", + "Some API operations in a user pool generate a challenge, like a prompt for an MFA code, for device authentication that bypasses MFA, or for a custom authentication challenge. A RespondToAuthChallenge API request provides the answer to that challenge, like a code or a secure remote password (SRP). The parameters of a response to an authentication challenge vary with the type of challenge. For more information about custom authentication challenges, see Custom authentication challenge Lambda triggers. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Servicesservice, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide", options: [ { name: "--client-id", @@ -4306,7 +4316,7 @@ const completionSpec: Fig.Spec = { { name: "--challenge-responses", description: - 'The responses to the challenge that you received in the previous request. Each challenge has its own required response parameters. The following examples are partial JSON request bodies that highlight challenge-response parameters. You must provide a SECRET_HASH parameter in all challenge responses to an app client that has a client secret. SMS_MFA "ChallengeName": "SMS_MFA", "ChallengeResponses": {"SMS_MFA_CODE": "[SMS_code]", "USERNAME": "[username]"} PASSWORD_VERIFIER "ChallengeName": "PASSWORD_VERIFIER", "ChallengeResponses": {"PASSWORD_CLAIM_SIGNATURE": "[claim_signature]", "PASSWORD_CLAIM_SECRET_BLOCK": "[secret_block]", "TIMESTAMP": [timestamp], "USERNAME": "[username]"} Add "DEVICE_KEY" when you sign in with a remembered device. CUSTOM_CHALLENGE "ChallengeName": "CUSTOM_CHALLENGE", "ChallengeResponses": {"USERNAME": "[username]", "ANSWER": "[challenge_answer]"} Add "DEVICE_KEY" when you sign in with a remembered device. NEW_PASSWORD_REQUIRED "ChallengeName": "NEW_PASSWORD_REQUIRED", "ChallengeResponses": {"NEW_PASSWORD": "[new_password]", "USERNAME": "[username]"} To set any required attributes that InitiateAuth returned in an requiredAttributes parameter, add "userAttributes.[attribute_name]": "[attribute_value]". This parameter can also set values for writable attributes that aren\'t required by your user pool. In a NEW_PASSWORD_REQUIRED challenge response, you can\'t modify a required attribute that already has a value. In RespondToAuthChallenge, set a value for any keys that Amazon Cognito returned in the requiredAttributes parameter, then use the UpdateUserAttributes API operation to modify the value of any additional attributes. SOFTWARE_TOKEN_MFA "ChallengeName": "SOFTWARE_TOKEN_MFA", "ChallengeResponses": {"USERNAME": "[username]", "SOFTWARE_TOKEN_MFA_CODE": [authenticator_code]} DEVICE_SRP_AUTH "ChallengeName": "DEVICE_SRP_AUTH", "ChallengeResponses": {"USERNAME": "[username]", "DEVICE_KEY": "[device_key]", "SRP_A": "[srp_a]"} DEVICE_PASSWORD_VERIFIER "ChallengeName": "DEVICE_PASSWORD_VERIFIER", "ChallengeResponses": {"DEVICE_KEY": "[device_key]", "PASSWORD_CLAIM_SIGNATURE": "[claim_signature]", "PASSWORD_CLAIM_SECRET_BLOCK": "[secret_block]", "TIMESTAMP": [timestamp], "USERNAME": "[username]"} MFA_SETUP "ChallengeName": "MFA_SETUP", "ChallengeResponses": {"USERNAME": "[username]"}, "SESSION": "[Session ID from VerifySoftwareToken]" SELECT_MFA_TYPE "ChallengeName": "SELECT_MFA_TYPE", "ChallengeResponses": {"USERNAME": "[username]", "ANSWER": "[SMS_MFA or SOFTWARE_TOKEN_MFA]"} For more information about SECRET_HASH, see Computing secret hash values. For information about DEVICE_KEY, see Working with user devices in your user pool', + 'The responses to the challenge that you received in the previous request. Each challenge has its own required response parameters. The following examples are partial JSON request bodies that highlight challenge-response parameters. You must provide a SECRET_HASH parameter in all challenge responses to an app client that has a client secret. SMS_MFA "ChallengeName": "SMS_MFA", "ChallengeResponses": {"SMS_MFA_CODE": "[code]", "USERNAME": "[username]"} EMAIL_OTP "ChallengeName": "EMAIL_OTP", "ChallengeResponses": {"EMAIL_OTP_CODE": "[code]", "USERNAME": "[username]"} PASSWORD_VERIFIER This challenge response is part of the SRP flow. Amazon Cognito requires that your application respond to this challenge within a few seconds. When the response time exceeds this period, your user pool returns a NotAuthorizedException error. "ChallengeName": "PASSWORD_VERIFIER", "ChallengeResponses": {"PASSWORD_CLAIM_SIGNATURE": "[claim_signature]", "PASSWORD_CLAIM_SECRET_BLOCK": "[secret_block]", "TIMESTAMP": [timestamp], "USERNAME": "[username]"} Add "DEVICE_KEY" when you sign in with a remembered device. CUSTOM_CHALLENGE "ChallengeName": "CUSTOM_CHALLENGE", "ChallengeResponses": {"USERNAME": "[username]", "ANSWER": "[challenge_answer]"} Add "DEVICE_KEY" when you sign in with a remembered device. NEW_PASSWORD_REQUIRED "ChallengeName": "NEW_PASSWORD_REQUIRED", "ChallengeResponses": {"NEW_PASSWORD": "[new_password]", "USERNAME": "[username]"} To set any required attributes that InitiateAuth returned in an requiredAttributes parameter, add "userAttributes.[attribute_name]": "[attribute_value]". This parameter can also set values for writable attributes that aren\'t required by your user pool. In a NEW_PASSWORD_REQUIRED challenge response, you can\'t modify a required attribute that already has a value. In RespondToAuthChallenge, set a value for any keys that Amazon Cognito returned in the requiredAttributes parameter, then use the UpdateUserAttributes API operation to modify the value of any additional attributes. SOFTWARE_TOKEN_MFA "ChallengeName": "SOFTWARE_TOKEN_MFA", "ChallengeResponses": {"USERNAME": "[username]", "SOFTWARE_TOKEN_MFA_CODE": [authenticator_code]} DEVICE_SRP_AUTH "ChallengeName": "DEVICE_SRP_AUTH", "ChallengeResponses": {"USERNAME": "[username]", "DEVICE_KEY": "[device_key]", "SRP_A": "[srp_a]"} DEVICE_PASSWORD_VERIFIER "ChallengeName": "DEVICE_PASSWORD_VERIFIER", "ChallengeResponses": {"DEVICE_KEY": "[device_key]", "PASSWORD_CLAIM_SIGNATURE": "[claim_signature]", "PASSWORD_CLAIM_SECRET_BLOCK": "[secret_block]", "TIMESTAMP": [timestamp], "USERNAME": "[username]"} MFA_SETUP "ChallengeName": "MFA_SETUP", "ChallengeResponses": {"USERNAME": "[username]"}, "SESSION": "[Session ID from VerifySoftwareToken]" SELECT_MFA_TYPE "ChallengeName": "SELECT_MFA_TYPE", "ChallengeResponses": {"USERNAME": "[username]", "ANSWER": "[SMS_MFA or SOFTWARE_TOKEN_MFA]"} For more information about SECRET_HASH, see Computing secret hash values. For information about DEVICE_KEY, see Working with user devices in your user pool', args: { name: "map", }, @@ -4560,7 +4570,7 @@ const completionSpec: Fig.Spec = { { name: "--sms-mfa-settings", description: - "The SMS text message multi-factor authentication (MFA) settings", + "User preferences for SMS message MFA. Activates or deactivates SMS MFA and sets it as the preferred MFA method when multiple methods are available", args: { name: "structure", }, @@ -4568,7 +4578,15 @@ const completionSpec: Fig.Spec = { { name: "--software-token-mfa-settings", description: - "The time-based one-time password (TOTP) software token MFA settings", + "User preferences for time-based one-time password (TOTP) MFA. Activates or deactivates TOTP MFA and sets it as the preferred MFA method when multiple methods are available", + args: { + name: "structure", + }, + }, + { + name: "--email-mfa-settings", + description: + "User preferences for email message MFA. Activates or deactivates email MFA and sets it as the preferred MFA method when multiple methods are available. To activate this setting, advanced security features must be active in your user pool", args: { name: "structure", }, @@ -4603,7 +4621,7 @@ const completionSpec: Fig.Spec = { { name: "set-user-pool-mfa-config", description: - "Sets the user pool multi-factor authentication (MFA) configuration. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide", + "Sets the user pool multi-factor authentication (MFA) configuration. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Servicesservice, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide", options: [ { name: "--user-pool-id", @@ -4614,14 +4632,24 @@ const completionSpec: Fig.Spec = { }, { name: "--sms-mfa-configuration", - description: "The SMS text message MFA configuration", + description: + "Configures user pool SMS messages for MFA. Sets the message template and the SMS message sending configuration for Amazon SNS", args: { name: "structure", }, }, { name: "--software-token-mfa-configuration", - description: "The software token MFA configuration", + description: + "Configures a user pool for time-based one-time password (TOTP) MFA. Enables or disables TOTP", + args: { + name: "structure", + }, + }, + { + name: "--email-mfa-configuration", + description: + "Configures user pool email messages for MFA. Sets the subject and body of the email message template for MFA messages. To activate this setting, advanced security features must be active in your user pool", args: { name: "structure", }, @@ -4696,7 +4724,7 @@ const completionSpec: Fig.Spec = { { name: "sign-up", description: - "Registers the user in the specified user pool and creates a user name, password, and user attributes. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide", + "Registers the user in the specified user pool and creates a user name, password, and user attributes. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Servicesservice, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide", options: [ { name: "--client-id", @@ -5225,7 +5253,7 @@ const completionSpec: Fig.Spec = { { name: "update-user-attributes", description: - "With this operation, your users can update one or more of their attributes with their own credentials. You authorize this API request with the user's access token. To delete an attribute from your user, submit the attribute in your API request with a blank value. Custom attribute values in this request must include the custom: prefix. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide", + "With this operation, your users can update one or more of their attributes with their own credentials. You authorize this API request with the user's access token. To delete an attribute from your user, submit the attribute in your API request with a blank value. Custom attribute values in this request must include the custom: prefix. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Servicesservice, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide", options: [ { name: "--user-attributes", @@ -5273,7 +5301,7 @@ const completionSpec: Fig.Spec = { { name: "update-user-pool", description: - "This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Updates the specified user pool with the specified attributes. You can get a list of the current user pool settings using DescribeUserPool. If you don't provide a value for an attribute, Amazon Cognito sets it to its default value. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints", + "This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Servicesservice, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Updates the specified user pool with the specified attributes. You can get a list of the current user pool settings using DescribeUserPool. If you don't provide a value for an attribute, Amazon Cognito sets it to its default value. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints", options: [ { name: "--user-pool-id", @@ -5505,7 +5533,7 @@ const completionSpec: Fig.Spec = { { name: "--read-attributes", description: - "The list of user attributes that you want your app client to have read-only access to. After your user authenticates in your app, their access token authorizes them to read their own attribute value for any attribute in this list. An example of this kind of activity is when your user selects a link to view their profile information. Your app makes a GetUser API request to retrieve and display your user's profile data. When you don't specify the ReadAttributes for your app client, your app can read the values of email_verified, phone_number_verified, and the Standard attributes of your user pool. When your user pool has read access to these default attributes, ReadAttributes doesn't return any information. Amazon Cognito only populates ReadAttributes in the API response if you have specified your own custom set of read attributes", + "The list of user attributes that you want your app client to have read access to. After your user authenticates in your app, their access token authorizes them to read their own attribute value for any attribute in this list. An example of this kind of activity is when your user selects a link to view their profile information. Your app makes a GetUser API request to retrieve and display your user's profile data. When you don't specify the ReadAttributes for your app client, your app can read the values of email_verified, phone_number_verified, and the Standard attributes of your user pool. When your user pool app client has read access to these default attributes, ReadAttributes doesn't return any information. Amazon Cognito only populates ReadAttributes in the API response if you have specified your own custom set of read attributes", args: { name: "list", }, diff --git a/src/aws/connect.ts b/src/aws/connect.ts index b6c311ed9f74..c3183cd32370 100644 --- a/src/aws/connect.ts +++ b/src/aws/connect.ts @@ -561,7 +561,7 @@ const completionSpec: Fig.Spec = { }, { name: "--key", - description: "A valid security key in PEM format", + description: "A valid security key in PEM format as a String", args: { name: "string", }, @@ -5734,7 +5734,7 @@ const completionSpec: Fig.Spec = { { name: "--metrics", description: - 'The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator Guide. ABANDONMENT_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Abandonment rate AGENT_ADHERENT_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherent time AGENT_ANSWER_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent answer rate AGENT_NON_ADHERENT_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Non-adherent time AGENT_NON_RESPONSE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent non-response AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy Data for this metric is available starting from October 1, 2023 0:00:00 GMT. UI name: Agent non-response without customer abandons AGENT_OCCUPANCY Unit: Percentage Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Occupancy AGENT_SCHEDULE_ADHERENCE This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherence AGENT_SCHEDULED_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Scheduled time AVG_ABANDON_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue abandon time AVG_ACTIVE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average active time AVG_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average after contact work time Feature is a valid filter but not a valid grouping. AVG_AGENT_CONNECTING_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Average agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. AVG_AGENT_PAUSE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average agent pause time AVG_CASE_RELATED_CONTACTS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average contacts per case AVG_CASE_RESOLUTION_TIME Unit: Seconds Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average case resolution time AVG_CONTACT_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average contact duration Feature is a valid filter but not a valid grouping. AVG_CONVERSATION_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average conversation duration AVG_DIALS_PER_MINUTE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent, Queue, Routing Profile UI name: Average dials per minute AVG_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Average flow time AVG_GREETING_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent greeting time AVG_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression UI name: Average handle time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME_ALL_CONTACTS Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time all contacts AVG_HOLDS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average holds Feature is a valid filter but not a valid grouping. AVG_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction and customer hold time AVG_INTERACTION_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction time Feature is a valid filter but not a valid grouping. AVG_INTERRUPTIONS_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruptions AVG_INTERRUPTION_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruption time AVG_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average non-talk time AVG_QUEUE_ANSWER_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue answer time Feature is a valid filter but not a valid grouping. AVG_RESOLUTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average resolution time AVG_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average talk time AVG_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent talk time AVG_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer talk time AVG_WAIT_TIME_AFTER_CUSTOMER_CONNECTION This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Seconds Valid groupings and filters: Campaign UI name: Average wait time after customer connection CAMPAIGN_CONTACTS_ABANDONED_AFTER_X This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X CAMPAIGN_CONTACTS_ABANDONED_AFTER_X_RATE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Percent Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X rate CASES_CREATED Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases created CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts created Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: Count Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: API contacts handled Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts handled (connected to agent timestamp) CONTACTS_HOLD_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts hold disconnect CONTACTS_ON_HOLD_AGENT_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold agent disconnect CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold customer disconnect CONTACTS_PUT_ON_HOLD Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts put on hold CONTACTS_TRANSFERRED_OUT_EXTERNAL Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out external CONTACTS_TRANSFERRED_OUT_INTERNAL Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out internal CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts queued CONTACTS_QUEUED_BY_ENQUEUE Unit: Count Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts queued (enqueue timestamp) CONTACTS_REMOVED_FROM_QUEUE_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts removed from queue in X seconds CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts resolved in X CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out Feature is a valid filter but not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out queue CURRENT_CASES Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases DELIVERY_ATTEMPTS This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Queue, Routing Profile, Answering Machine Detection Status, Disconnect Reason UI name: Delivery attempts DELIVERY_ATTEMPT_DISPOSITION_RATE This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Percent Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Answering Machine Detection Status, Disconnect Reason Answering Machine Detection Status and Disconnect Reason are valid filters but not valid groupings. UI name: Delivery attempt disposition rate FLOWS_OUTCOME Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome FLOWS_STARTED Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows started HUMAN_ANSWERED_CALLS This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Count Valid groupings and filters: Campaign, Agent UI name: Human answered MAX_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Maximum flow time MAX_QUEUED_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Maximum queued time MIN_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Minimum flow time PERCENT_CASES_FIRST_CONTACT_RESOLVED Unit: Percent Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved on first contact PERCENT_CONTACTS_STEP_EXPIRED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_CONTACTS_STEP_JOINED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_FLOWS_OUTCOME Unit: Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome percentage. The FLOWS_OUTCOME_TYPE is not a valid grouping. PERCENT_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Non-talk time percent PERCENT_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Talk time percent PERCENT_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Agent talk time percent PERCENT_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Customer talk time percent REOPENED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases reopened RESOLVED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved SERVICE_LEVEL You can include up to 20 SERVICE_LEVEL metrics in a request. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Service level X STEP_CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. SUM_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: After contact work time SUM_CONNECTING_TIME_AGENT Unit: Seconds Valid metric filter key: INITIATION_METHOD. This metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. SUM_CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| Incoming | Outbound | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: Contact abandoned SUM_CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts abandoned in X seconds SUM_CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts answered in X seconds SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact flow time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent on contact time SUM_CONTACTS_DISCONNECTED Valid metric filter key: DISCONNECT_REASON Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contact disconnected SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time SUM_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Agent interaction and hold time SUM_INTERACTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent interaction time SUM_NON_PRODUCTIVE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Non-Productive Time SUM_ONLINE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Online time SUM_RETRY_CALLBACK_ATTEMPTS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Callback attempts', + 'The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator Guide. ABANDONMENT_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Abandonment rate AGENT_ADHERENT_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherent time AGENT_ANSWER_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent answer rate AGENT_NON_ADHERENT_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Non-adherent time AGENT_NON_RESPONSE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent non-response AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy Data for this metric is available starting from October 1, 2023 0:00:00 GMT. UI name: Agent non-response without customer abandons AGENT_OCCUPANCY Unit: Percentage Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Occupancy AGENT_SCHEDULE_ADHERENCE This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherence AGENT_SCHEDULED_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Scheduled time AVG_ABANDON_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue abandon time AVG_ACTIVE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average active time AVG_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average after contact work time Feature is a valid filter but not a valid grouping. AVG_AGENT_CONNECTING_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Average agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. AVG_AGENT_PAUSE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average agent pause time AVG_CASE_RELATED_CONTACTS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average contacts per case AVG_CASE_RESOLUTION_TIME Unit: Seconds Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average case resolution time AVG_CONTACT_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average contact duration Feature is a valid filter but not a valid grouping. AVG_CONVERSATION_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average conversation duration AVG_DIALS_PER_MINUTE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent, Queue, Routing Profile UI name: Average dials per minute AVG_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Average flow time AVG_GREETING_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent greeting time AVG_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression UI name: Average handle time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME_ALL_CONTACTS Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time all contacts AVG_HOLDS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average holds Feature is a valid filter but not a valid grouping. AVG_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction and customer hold time AVG_INTERACTION_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction time Feature is a valid filter but not a valid grouping. AVG_INTERRUPTIONS_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruptions AVG_INTERRUPTION_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruption time AVG_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average non-talk time AVG_QUEUE_ANSWER_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue answer time Feature is a valid filter but not a valid grouping. AVG_RESOLUTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average resolution time AVG_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average talk time AVG_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent talk time AVG_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer talk time AVG_WAIT_TIME_AFTER_CUSTOMER_CONNECTION This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Seconds Valid groupings and filters: Campaign UI name: Average wait time after customer connection CAMPAIGN_CONTACTS_ABANDONED_AFTER_X This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X CAMPAIGN_CONTACTS_ABANDONED_AFTER_X_RATE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Percent Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X rate CASES_CREATED Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases created CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts created Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: Count Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: API contacts handled Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts handled (connected to agent timestamp) CONTACTS_HOLD_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts hold disconnect CONTACTS_ON_HOLD_AGENT_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold agent disconnect CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold customer disconnect CONTACTS_PUT_ON_HOLD Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts put on hold CONTACTS_TRANSFERRED_OUT_EXTERNAL Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out external CONTACTS_TRANSFERRED_OUT_INTERNAL Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out internal CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts queued CONTACTS_QUEUED_BY_ENQUEUE Unit: Count Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts queued (enqueue timestamp) CONTACTS_REMOVED_FROM_QUEUE_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts removed from queue in X seconds CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts resolved in X CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out Feature is a valid filter but not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out queue CURRENT_CASES Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases DELIVERY_ATTEMPTS This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Queue, Routing Profile, Answering Machine Detection Status, Disconnect Reason UI name: Delivery attempts DELIVERY_ATTEMPT_DISPOSITION_RATE This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Percent Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Answering Machine Detection Status, Disconnect Reason Answering Machine Detection Status and Disconnect Reason are valid filters but not valid groupings. UI name: Delivery attempt disposition rate FLOWS_OUTCOME Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome FLOWS_STARTED Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows started HUMAN_ANSWERED_CALLS This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Count Valid groupings and filters: Campaign, Agent UI name: Human answered MAX_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Maximum flow time MAX_QUEUED_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Maximum queued time MIN_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Minimum flow time PERCENT_CASES_FIRST_CONTACT_RESOLVED Unit: Percent Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved on first contact PERCENT_CONTACTS_STEP_EXPIRED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_CONTACTS_STEP_JOINED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_FLOWS_OUTCOME Unit: Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome percentage. The FLOWS_OUTCOME_TYPE is not a valid grouping. PERCENT_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Non-talk time percent PERCENT_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Talk time percent PERCENT_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Agent talk time percent PERCENT_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Customer talk time percent REOPENED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases reopened RESOLVED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved SERVICE_LEVEL You can include up to 20 SERVICE_LEVEL metrics in a request. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Service level X STEP_CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. SUM_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: After contact work time SUM_CONNECTING_TIME_AGENT Unit: Seconds Valid metric filter key: INITIATION_METHOD. This metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| Incoming | Outbound | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: Contact abandoned SUM_CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts abandoned in X seconds SUM_CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts answered in X seconds SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact flow time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent on contact time SUM_CONTACTS_DISCONNECTED Valid metric filter key: DISCONNECT_REASON Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contact disconnected SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time SUM_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Agent interaction and hold time SUM_INTERACTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent interaction time SUM_NON_PRODUCTIVE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Non-Productive Time SUM_ONLINE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Online time SUM_RETRY_CALLBACK_ATTEMPTS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Callback attempts', args: { name: "list", }, diff --git a/src/aws/datazone.ts b/src/aws/datazone.ts index d1981ddb93b3..0dc5a3d33978 100644 --- a/src/aws/datazone.ts +++ b/src/aws/datazone.ts @@ -76,6 +76,13 @@ const completionSpec: Fig.Spec = { name: "accept-subscription-request", description: "Accepts a subscription request to a specific asset", options: [ + { + name: "--asset-scopes", + description: "The asset scopes of the accept subscription request", + args: { + name: "list", + }, + }, { name: "--decision-comment", description: @@ -119,6 +126,143 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "add-entity-owner", + description: "Adds the owner of an entity (a domain unit)", + options: [ + { + name: "--client-token", + description: + "A unique, case-sensitive identifier that is provided to ensure the idempotency of the request", + args: { + name: "string", + }, + }, + { + name: "--domain-identifier", + description: + "The ID of the domain in which you want to add the entity owner", + args: { + name: "string", + }, + }, + { + name: "--entity-identifier", + description: "The ID of the entity to which you want to add an owner", + args: { + name: "string", + }, + }, + { + name: "--entity-type", + description: "The type of an entity", + args: { + name: "string", + }, + }, + { + name: "--owner", + description: "The owner that you want to add to the entity", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "add-policy-grant", + description: + "Adds a policy grant (an authorization policy) to a specified entity, including domain units, environment blueprint configurations, or environment profiles", + options: [ + { + name: "--client-token", + description: + "A unique, case-sensitive identifier that is provided to ensure the idempotency of the request", + args: { + name: "string", + }, + }, + { + name: "--detail", + description: "The details of the policy grant", + args: { + name: "structure", + }, + }, + { + name: "--domain-identifier", + description: + "The ID of the domain where you want to add a policy grant", + args: { + name: "string", + }, + }, + { + name: "--entity-identifier", + description: + "The ID of the entity (resource) to which you want to add a policy grant", + args: { + name: "string", + }, + }, + { + name: "--entity-type", + description: + "The type of entity (resource) to which the grant is added", + args: { + name: "string", + }, + }, + { + name: "--policy-type", + description: "The type of policy that you want to grant", + args: { + name: "string", + }, + }, + { + name: "--principal", + description: "The principal to whom the permissions are granted", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "associate-environment-role", description: "Associates the environment role in Amazon DataZone", @@ -924,6 +1068,66 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-domain-unit", + description: "Creates a domain unit in Amazon DataZone", + options: [ + { + name: "--client-token", + description: + "A unique, case-sensitive identifier that is provided to ensure the idempotency of the request", + args: { + name: "string", + }, + }, + { + name: "--description", + description: "The description of the domain unit", + args: { + name: "string", + }, + }, + { + name: "--domain-identifier", + description: + "The ID of the domain where you want to crate a domain unit", + args: { + name: "string", + }, + }, + { + name: "--name", + description: "The name of the domain unit", + args: { + name: "string", + }, + }, + { + name: "--parent-domain-unit-identifier", + description: "The ID of the parent domain unit", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-environment", description: "Create an Amazon DataZone environment", @@ -1524,6 +1728,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--domain-unit-id", + description: + "The ID of the domain unit. This parameter is not required and if it is not specified, then the project is created at the root domain unit level", + args: { + name: "string", + }, + }, { name: "--glossary-terms", description: @@ -2020,7 +2232,7 @@ const completionSpec: Fig.Spec = { }, { name: "delete-data-product", - description: "Deletes an data product in Amazon DataZone", + description: "Deletes a data product in Amazon DataZone", options: [ { name: "--domain-identifier", @@ -2161,6 +2373,44 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-domain-unit", + description: "Deletes a domain unit", + options: [ + { + name: "--domain-identifier", + description: + "The ID of the domain where you want to delete a domain unit", + args: { + name: "string", + }, + }, + { + name: "--identifier", + description: "The ID of the domain unit that you want to delete", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-environment", description: "Deletes an environment in Amazon DataZone", @@ -3085,6 +3335,44 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-domain-unit", + description: "Gets the details of the specified domain unit", + options: [ + { + name: "--domain-identifier", + description: + "The ID of the domain where you want to get a domain unit", + args: { + name: "string", + }, + }, + { + name: "--identifier", + description: "The identifier of the domain unit that you want to get", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-environment", description: "Gets an Amazon DataZone environment", @@ -4468,11 +4756,90 @@ const completionSpec: Fig.Spec = { ], }, { - name: "list-domains", - description: "Lists Amazon DataZone domains", + name: "list-domain-units-for-parent", + description: + "Lists child domain units for the specified parent domain unit", options: [ { - name: "--max-results", + name: "--domain-identifier", + description: + "The ID of the domain in which you want to list domain units for a parent domain unit", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum number of domain units to return in a single call to ListDomainUnitsForParent. When the number of domain units to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListDomainUnitsForParent to list the next set of domain units", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: + "When the number of domain units is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of domain units, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListDomainUnitsForParent to list the next set of domain units", + args: { + name: "string", + }, + }, + { + name: "--parent-domain-unit-identifier", + description: "The ID of the parent domain unit", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-domains", + description: "Lists Amazon DataZone domains", + options: [ + { + name: "--max-results", description: "The maximum number of domains to return in a single call to ListDomains. When the number of domains to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListDomains to list the next set of domains", args: { @@ -4537,6 +4904,91 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-entity-owners", + description: "Lists the entity (domain units) owners", + options: [ + { + name: "--domain-identifier", + description: + "The ID of the domain where you want to list entity owners", + args: { + name: "string", + }, + }, + { + name: "--entity-identifier", + description: "The ID of the entity that you want to list", + args: { + name: "string", + }, + }, + { + name: "--entity-type", + description: "The type of the entity that you want to list", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum number of entities to return in a single call to ListEntityOwners. When the number of entities to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListEntityOwners to list the next set of entities", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: + "When the number of entities is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of entities, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListEntityOwners to list the next set of entities", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-environment-actions", description: "Lists existing environment actions", @@ -5310,6 +5762,100 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-policy-grants", + description: "Lists policy grants", + options: [ + { + name: "--domain-identifier", + description: + "The ID of the domain where you want to list policy grants", + args: { + name: "string", + }, + }, + { + name: "--entity-identifier", + description: + "The ID of the entity for which you want to list policy grants", + args: { + name: "string", + }, + }, + { + name: "--entity-type", + description: + "The type of entity for which you want to list policy grants", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum number of grants to return in a single call to ListPolicyGrants. When the number of grants to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListPolicyGrants to list the next set of grants", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: + "When the number of grants is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of grants, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListPolicyGrants to list the next set of grants", + args: { + name: "string", + }, + }, + { + name: "--policy-type", + description: "The type of policy that you want to list", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-project-memberships", description: "Lists all members of the specified project", @@ -6392,6 +6938,138 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "remove-entity-owner", + description: "Removes an owner from an entity", + options: [ + { + name: "--client-token", + description: + "A unique, case-sensitive identifier that is provided to ensure the idempotency of the request", + args: { + name: "string", + }, + }, + { + name: "--domain-identifier", + description: + "The ID of the domain where you want to remove an owner from an entity", + args: { + name: "string", + }, + }, + { + name: "--entity-identifier", + description: + "The ID of the entity from which you want to remove an owner", + args: { + name: "string", + }, + }, + { + name: "--entity-type", + description: + "The type of the entity from which you want to remove an owner", + args: { + name: "string", + }, + }, + { + name: "--owner", + description: "The owner that you want to remove from an entity", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "remove-policy-grant", + description: "Removes a policy grant", + options: [ + { + name: "--client-token", + description: + "A unique, case-sensitive identifier that is provided to ensure the idempotency of the request", + args: { + name: "string", + }, + }, + { + name: "--domain-identifier", + description: + "The ID of the domain where you want to remove a policy grant", + args: { + name: "string", + }, + }, + { + name: "--entity-identifier", + description: + "The ID of the entity from which you want to remove a policy grant", + args: { + name: "string", + }, + }, + { + name: "--entity-type", + description: + "The type of the entity from which you want to remove a policy grant", + args: { + name: "string", + }, + }, + { + name: "--policy-type", + description: "The type of the policy that you want to remove", + args: { + name: "string", + }, + }, + { + name: "--principal", + description: + "The principal from which you want to remove a policy grant", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "revoke-subscription", description: "Revokes a specified subscription in Amazon DataZone", @@ -7389,6 +8067,59 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "update-domain-unit", + description: "Updates the domain unit", + options: [ + { + name: "--description", + description: + "The description of the domain unit that you want to update", + args: { + name: "string", + }, + }, + { + name: "--domain-identifier", + description: + "The ID of the domain where you want to update a domain unit", + args: { + name: "string", + }, + }, + { + name: "--identifier", + description: "The ID of the domain unit that you want to update", + args: { + name: "string", + }, + }, + { + name: "--name", + description: "The name of the domain unit that you want to update", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "update-environment", description: "Updates the specified environment in Amazon DataZone", @@ -7814,7 +8545,7 @@ const completionSpec: Fig.Spec = { { name: "--domain-identifier", description: - "The identifier of the Amazon DataZone domain in which a project is to be updated", + "The ID of the Amazon DataZone domain where a project is being updated", args: { name: "string", }, diff --git a/src/aws/devicefarm.ts b/src/aws/devicefarm.ts index 1817e60fc901..293aaabfb491 100644 --- a/src/aws/devicefarm.ts +++ b/src/aws/devicefarm.ts @@ -522,7 +522,7 @@ const completionSpec: Fig.Spec = { { name: "--type", description: - "The upload's upload type. Must be one of the following values: ANDROID_APP IOS_APP WEB_APP EXTERNAL_DATA APPIUM_JAVA_JUNIT_TEST_PACKAGE APPIUM_JAVA_TESTNG_TEST_PACKAGE APPIUM_PYTHON_TEST_PACKAGE APPIUM_NODE_TEST_PACKAGE APPIUM_RUBY_TEST_PACKAGE APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE APPIUM_WEB_PYTHON_TEST_PACKAGE APPIUM_WEB_NODE_TEST_PACKAGE APPIUM_WEB_RUBY_TEST_PACKAGE CALABASH_TEST_PACKAGE INSTRUMENTATION_TEST_PACKAGE UIAUTOMATION_TEST_PACKAGE UIAUTOMATOR_TEST_PACKAGE XCTEST_TEST_PACKAGE XCTEST_UI_TEST_PACKAGE APPIUM_JAVA_JUNIT_TEST_SPEC APPIUM_JAVA_TESTNG_TEST_SPEC APPIUM_PYTHON_TEST_SPEC APPIUM_NODE_TEST_SPEC APPIUM_RUBY_TEST_SPEC APPIUM_WEB_JAVA_JUNIT_TEST_SPEC APPIUM_WEB_JAVA_TESTNG_TEST_SPEC APPIUM_WEB_PYTHON_TEST_SPEC APPIUM_WEB_NODE_TEST_SPEC APPIUM_WEB_RUBY_TEST_SPEC INSTRUMENTATION_TEST_SPEC XCTEST_UI_TEST_SPEC If you call CreateUpload with WEB_APP specified, AWS Device Farm throws an ArgumentException error", + "The upload's upload type. Must be one of the following values: ANDROID_APP IOS_APP WEB_APP EXTERNAL_DATA APPIUM_JAVA_JUNIT_TEST_PACKAGE APPIUM_JAVA_TESTNG_TEST_PACKAGE APPIUM_PYTHON_TEST_PACKAGE APPIUM_NODE_TEST_PACKAGE APPIUM_RUBY_TEST_PACKAGE APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE APPIUM_WEB_PYTHON_TEST_PACKAGE APPIUM_WEB_NODE_TEST_PACKAGE APPIUM_WEB_RUBY_TEST_PACKAGE INSTRUMENTATION_TEST_PACKAGE XCTEST_TEST_PACKAGE XCTEST_UI_TEST_PACKAGE APPIUM_JAVA_JUNIT_TEST_SPEC APPIUM_JAVA_TESTNG_TEST_SPEC APPIUM_PYTHON_TEST_SPEC APPIUM_NODE_TEST_SPEC APPIUM_RUBY_TEST_SPEC APPIUM_WEB_JAVA_JUNIT_TEST_SPEC APPIUM_WEB_JAVA_TESTNG_TEST_SPEC APPIUM_WEB_PYTHON_TEST_SPEC APPIUM_WEB_NODE_TEST_SPEC APPIUM_WEB_RUBY_TEST_SPEC INSTRUMENTATION_TEST_SPEC XCTEST_UI_TEST_SPEC If you call CreateUpload with WEB_APP specified, AWS Device Farm throws an ArgumentException error", args: { name: "string", }, @@ -1031,7 +1031,7 @@ const completionSpec: Fig.Spec = { { name: "--test-type", description: - "The test type for the specified device pool. Allowed values include the following: BUILTIN_FUZZ. BUILTIN_EXPLORER. For Android, an app explorer that traverses an Android app, interacting with it and capturing screenshots at the same time. APPIUM_JAVA_JUNIT. APPIUM_JAVA_TESTNG. APPIUM_PYTHON. APPIUM_NODE. APPIUM_RUBY. APPIUM_WEB_JAVA_JUNIT. APPIUM_WEB_JAVA_TESTNG. APPIUM_WEB_PYTHON. APPIUM_WEB_NODE. APPIUM_WEB_RUBY. CALABASH. INSTRUMENTATION. UIAUTOMATION. UIAUTOMATOR. XCTEST. XCTEST_UI", + "The test type for the specified device pool. Allowed values include the following: BUILTIN_FUZZ. APPIUM_JAVA_JUNIT. APPIUM_JAVA_TESTNG. APPIUM_PYTHON. APPIUM_NODE. APPIUM_RUBY. APPIUM_WEB_JAVA_JUNIT. APPIUM_WEB_JAVA_TESTNG. APPIUM_WEB_PYTHON. APPIUM_WEB_NODE. APPIUM_WEB_RUBY. INSTRUMENTATION. XCTEST. XCTEST_UI", args: { name: "string", }, @@ -2762,7 +2762,7 @@ const completionSpec: Fig.Spec = { { name: "--type", description: - "The type of upload. Must be one of the following values: ANDROID_APP IOS_APP WEB_APP EXTERNAL_DATA APPIUM_JAVA_JUNIT_TEST_PACKAGE APPIUM_JAVA_TESTNG_TEST_PACKAGE APPIUM_PYTHON_TEST_PACKAGE APPIUM_NODE_TEST_PACKAGE APPIUM_RUBY_TEST_PACKAGE APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE APPIUM_WEB_PYTHON_TEST_PACKAGE APPIUM_WEB_NODE_TEST_PACKAGE APPIUM_WEB_RUBY_TEST_PACKAGE CALABASH_TEST_PACKAGE INSTRUMENTATION_TEST_PACKAGE UIAUTOMATION_TEST_PACKAGE UIAUTOMATOR_TEST_PACKAGE XCTEST_TEST_PACKAGE XCTEST_UI_TEST_PACKAGE APPIUM_JAVA_JUNIT_TEST_SPEC APPIUM_JAVA_TESTNG_TEST_SPEC APPIUM_PYTHON_TEST_SPEC APPIUM_NODE_TEST_SPEC APPIUM_RUBY_TEST_SPEC APPIUM_WEB_JAVA_JUNIT_TEST_SPEC APPIUM_WEB_JAVA_TESTNG_TEST_SPEC APPIUM_WEB_PYTHON_TEST_SPEC APPIUM_WEB_NODE_TEST_SPEC APPIUM_WEB_RUBY_TEST_SPEC INSTRUMENTATION_TEST_SPEC XCTEST_UI_TEST_SPEC", + "The type of upload. Must be one of the following values: ANDROID_APP IOS_APP WEB_APP EXTERNAL_DATA APPIUM_JAVA_JUNIT_TEST_PACKAGE APPIUM_JAVA_TESTNG_TEST_PACKAGE APPIUM_PYTHON_TEST_PACKAGE APPIUM_NODE_TEST_PACKAGE APPIUM_RUBY_TEST_PACKAGE APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE APPIUM_WEB_PYTHON_TEST_PACKAGE APPIUM_WEB_NODE_TEST_PACKAGE APPIUM_WEB_RUBY_TEST_PACKAGE INSTRUMENTATION_TEST_PACKAGE XCTEST_TEST_PACKAGE XCTEST_UI_TEST_PACKAGE APPIUM_JAVA_JUNIT_TEST_SPEC APPIUM_JAVA_TESTNG_TEST_SPEC APPIUM_PYTHON_TEST_SPEC APPIUM_NODE_TEST_SPEC APPIUM_RUBY_TEST_SPEC APPIUM_WEB_JAVA_JUNIT_TEST_SPEC APPIUM_WEB_JAVA_TESTNG_TEST_SPEC APPIUM_WEB_PYTHON_TEST_SPEC APPIUM_WEB_NODE_TEST_SPEC APPIUM_WEB_RUBY_TEST_SPEC INSTRUMENTATION_TEST_SPEC XCTEST_UI_TEST_SPEC", args: { name: "string", }, diff --git a/src/aws/dynamodb.ts b/src/aws/dynamodb.ts index 26a6f871853d..d786d79cb1b5 100644 --- a/src/aws/dynamodb.ts +++ b/src/aws/dynamodb.ts @@ -2618,7 +2618,7 @@ const completionSpec: Fig.Spec = { { name: "tag-resource", description: - "Associate a set of tags with an Amazon DynamoDB resource. You can then activate these user-defined tags so that they appear on the Billing and Cost Management console for cost allocation tracking. You can call TagResource up to five times per second, per account. For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide", + "Associate a set of tags with an Amazon DynamoDB resource. You can then activate these user-defined tags so that they appear on the Billing and Cost Management console for cost allocation tracking. You can call TagResource up to five times per second, per account. TagResource is an asynchronous operation. If you issue a ListTagsOfResource request immediately after a TagResource request, DynamoDB might return your previous tag set, if there was one, or an empty tag set. This is because ListTagsOfResource uses an eventually consistent query, and the metadata for your tags or table might not be available at that moment. Wait for a few seconds, and then try the ListTagsOfResource request again. The application or removal of tags using TagResource and UntagResource APIs is eventually consistent. ListTagsOfResource API will only reflect the changes after a few seconds. For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide", options: [ { name: "--resource-arn", @@ -2754,7 +2754,7 @@ const completionSpec: Fig.Spec = { { name: "untag-resource", description: - "Removes the association of tags from an Amazon DynamoDB resource. You can call UntagResource up to five times per second, per account. For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide", + "Removes the association of tags from an Amazon DynamoDB resource. You can call UntagResource up to five times per second, per account. UntagResource is an asynchronous operation. If you issue a ListTagsOfResource request immediately after an UntagResource request, DynamoDB might return your previous tag set, if there was one, or an empty tag set. This is because ListTagsOfResource uses an eventually consistent query, and the metadata for your tags or table might not be available at that moment. Wait for a few seconds, and then try the ListTagsOfResource request again. The application or removal of tags using TagResource and UntagResource APIs is eventually consistent. ListTagsOfResource API will only reflect the changes after a few seconds. For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide", options: [ { name: "--resource-arn", diff --git a/src/aws/ecs.ts b/src/aws/ecs.ts index 644feef640f0..4606650731e5 100644 --- a/src/aws/ecs.ts +++ b/src/aws/ecs.ts @@ -2653,7 +2653,7 @@ const completionSpec: Fig.Spec = { { name: "--name", description: - "The Amazon ECS account setting name to modify. The following are the valid values for the account setting name. serviceLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging. taskLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging. containerInstanceLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging. awsvpcTrunking - When modified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide. containerInsights - When modified, the default setting indicating whether Amazon Web Services CloudWatch Container Insights is turned on for your clusters is changed. If containerInsights is turned on, any new clusters that are created will have Container Insights turned on unless you disable it during cluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide. dualStackIPv6 - When turned on, when using a VPC in dual stack mode, your tasks using the awsvpc network mode can have an IPv6 address assigned. For more information on using IPv6 with tasks launched on Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6 with tasks launched on Fargate, see Using a VPC in dual-stack mode. fargateFIPSMode - If you specify fargateFIPSMode, Fargate FIPS 140 compliance is affected. fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod to configure the wait time to retire a Fargate task. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide. tagResourceAuthorization - Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide. guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring", + "The Amazon ECS account setting name to modify. The following are the valid values for the account setting name. serviceLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging. taskLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging. containerInstanceLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging. awsvpcTrunking - When modified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide. containerInsights - When modified, the default setting indicating whether Amazon Web Services CloudWatch Container Insights is turned on for your clusters is changed. If containerInsights is turned on, any new clusters that are created will have Container Insights turned on unless you disable it during cluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide. dualStackIPv6 - When turned on, when using a VPC in dual stack mode, your tasks using the awsvpc network mode can have an IPv6 address assigned. For more information on using IPv6 with tasks launched on Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6 with tasks launched on Fargate, see Using a VPC in dual-stack mode. fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod to configure the wait time to retire a Fargate task. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide. tagResourceAuthorization - Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide. guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring", args: { name: "string", suggestions: accountSettingsResourceNames, diff --git a/src/aws/elbv2.ts b/src/aws/elbv2.ts index 0501ea293fe5..95e343cd9c1f 100644 --- a/src/aws/elbv2.ts +++ b/src/aws/elbv2.ts @@ -871,6 +871,36 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "describe-listener-attributes", + description: "Describes the attributes for the specified listener", + options: [ + { + name: "--listener-arn", + description: "The Amazon Resource Name (ARN) of the listener", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "describe-listener-certificates", description: @@ -1768,6 +1798,44 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "modify-listener-attributes", + description: + "Modifies the specified attributes of the specified listener", + options: [ + { + name: "--listener-arn", + description: "The Amazon Resource Name (ARN) of the listener", + args: { + name: "string", + }, + }, + { + name: "--attributes", + description: "The listener attributes", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "modify-load-balancer-attributes", description: @@ -1968,7 +2036,7 @@ const completionSpec: Fig.Spec = { }, { name: "--attributes", - description: "The attributes", + description: "The target group attributes", args: { name: "list", }, diff --git a/src/aws/finspace.ts b/src/aws/finspace.ts index 9a2634e74726..17199ca38575 100644 --- a/src/aws/finspace.ts +++ b/src/aws/finspace.ts @@ -601,7 +601,7 @@ const completionSpec: Fig.Spec = { { name: "--host-type", description: - "The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed. You can add one of the following values: kx.sg.4xlarge \u2013 The host type with a configuration of 108 GiB memory and 16 vCPUs. kx.sg.8xlarge \u2013 The host type with a configuration of 216 GiB memory and 32 vCPUs. kx.sg.16xlarge \u2013 The host type with a configuration of 432 GiB memory and 64 vCPUs. kx.sg.32xlarge \u2013 The host type with a configuration of 864 GiB memory and 128 vCPUs. kx.sg1.16xlarge \u2013 The host type with a configuration of 1949 GiB memory and 64 vCPUs. kx.sg1.24xlarge \u2013 The host type with a configuration of 2948 GiB memory and 96 vCPUs", + "The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed. You can add one of the following values: kx.sg.large \u2013 The host type with a configuration of 16 GiB memory and 2 vCPUs. kx.sg.xlarge \u2013 The host type with a configuration of 32 GiB memory and 4 vCPUs. kx.sg.2xlarge \u2013 The host type with a configuration of 64 GiB memory and 8 vCPUs. kx.sg.4xlarge \u2013 The host type with a configuration of 108 GiB memory and 16 vCPUs. kx.sg.8xlarge \u2013 The host type with a configuration of 216 GiB memory and 32 vCPUs. kx.sg.16xlarge \u2013 The host type with a configuration of 432 GiB memory and 64 vCPUs. kx.sg.32xlarge \u2013 The host type with a configuration of 864 GiB memory and 128 vCPUs. kx.sg1.16xlarge \u2013 The host type with a configuration of 1949 GiB memory and 64 vCPUs. kx.sg1.24xlarge \u2013 The host type with a configuration of 2948 GiB memory and 96 vCPUs", args: { name: "string", }, diff --git a/src/aws/fis.ts b/src/aws/fis.ts index ed7bfc21e7df..d96c34e52353 100644 --- a/src/aws/fis.ts +++ b/src/aws/fis.ts @@ -352,6 +352,36 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-safety-lever", + description: "Gets information about the specified safety lever", + options: [ + { + name: "--id", + description: "The ID of the safety lever", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-target-account-configuration", description: @@ -984,6 +1014,43 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "update-safety-lever-state", + description: "Updates the specified safety lever state", + options: [ + { + name: "--id", + description: "The ID of the safety lever", + args: { + name: "string", + }, + }, + { + name: "--state", + description: "The state of the safety lever", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "update-target-account-configuration", description: diff --git a/src/aws/gamelift.ts b/src/aws/gamelift.ts index f42ee0e2fe5d..28acace4cde1 100644 --- a/src/aws/gamelift.ts +++ b/src/aws/gamelift.ts @@ -185,7 +185,7 @@ const completionSpec: Fig.Spec = { { name: "--operating-system", description: - "The operating system that your game server binaries run on. This value determines the type of fleet resources that you use for this build. If your game build contains multiple executables, they all must run on the same operating system. You must specify a valid operating system in this request. There is no default value. You can't change a build's operating system later. If you have active fleets using the Windows Server 2012 operating system, you can continue to create new builds using this OS until October 10, 2023, when Microsoft ends its support. All others must use Windows Server 2016 when creating new Windows-based builds", + "The operating system that your game server binaries run on. This value determines the type of fleet resources that you use for this build. If your game build contains multiple executables, they all must run on the same operating system. You must specify a valid operating system in this request. There is no default value. You can't change a build's operating system later. Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x., first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to Amazon GameLift server SDK version 5", args: { name: "string", }, @@ -281,7 +281,7 @@ const completionSpec: Fig.Spec = { { name: "--operating-system", description: - "The platform that is used by containers in the container group definition. All containers in a group must run on the same operating system", + "The platform that is used by containers in the container group definition. All containers in a group must run on the same operating system. Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x., first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to Amazon GameLift server SDK version 5", args: { name: "string", }, @@ -464,7 +464,7 @@ const completionSpec: Fig.Spec = { { name: "--locations", description: - "A set of remote locations to deploy additional instances to and manage as part of the fleet. This parameter can only be used when creating fleets in Amazon Web Services Regions that support multiple locations. You can add any Amazon GameLift-supported Amazon Web Services Region as a remote location, in the form of an Amazon Web Services Region code, such as us-west-2 or Local Zone code. To create a fleet with instances in the home Region only, don't set this parameter. When using this parameter, Amazon GameLift requires you to include your home location in the request", + "A set of remote locations to deploy additional instances to and manage as a multi-location fleet. Use this parameter when creating a fleet in Amazon Web Services Regions that support multiple locations. You can add any Amazon Web Services Region or Local Zone that's supported by Amazon GameLift. Provide a list of one or more Amazon Web Services Region codes, such as us-west-2, or Local Zone names. When using this parameter, Amazon GameLift requires you to include your home location in the request. For a list of supported Regions and Local Zones, see Amazon GameLift service locations for managed hosting", args: { name: "list", }, @@ -530,7 +530,7 @@ const completionSpec: Fig.Spec = { { name: "create-fleet-locations", description: - "This operation has been expanded to use with the Amazon GameLift containers feature, which is currently in public preview. Adds remote locations to an EC2 or container fleet and begins populating the new locations with instances. The new instances conform to the fleet's instance type, auto-scaling, and other configuration settings. You can't add remote locations to a fleet that resides in an Amazon Web Services Region that doesn't support multiple locations. Fleets created prior to March 2021 can't support multiple locations. To add fleet locations, specify the fleet to be updated and provide a list of one or more locations. If successful, this operation returns the list of added locations with their status set to NEW. Amazon GameLift initiates the process of starting an instance in each added location. You can track the status of each new location by monitoring location creation events using DescribeFleetEvents. Learn more Setting up fleets Multi-location fleets", + "This operation has been expanded to use with the Amazon GameLift containers feature, which is currently in public preview. Adds remote locations to an EC2 or container fleet and begins populating the new locations with instances. The new instances conform to the fleet's instance type, auto-scaling, and other configuration settings. You can't add remote locations to a fleet that resides in an Amazon Web Services Region that doesn't support multiple locations. Fleets created prior to March 2021 can't support multiple locations. To add fleet locations, specify the fleet to be updated and provide a list of one or more locations. If successful, this operation returns the list of added locations with their status set to NEW. Amazon GameLift initiates the process of starting an instance in each added location. You can track the status of each new location by monitoring location creation events using DescribeFleetEvents. Learn more Setting up fleets Update fleet locations Amazon GameLift service locations for managed hosting", options: [ { name: "--fleet-id", @@ -893,7 +893,7 @@ const completionSpec: Fig.Spec = { { name: "--tags", description: - "A list of labels to assign to the new matchmaking configuration resource. Tags are developer-defined key-value pairs. Tagging Amazon Web Services resources are useful for resource management, access management and cost allocation. For more information, see Tagging Amazon Web Services Resources in the Amazon Web Services General Rareference", + "A list of labels to assign to the new resource. Tags are developer-defined key-value pairs. Tagging Amazon Web Services resources are useful for resource management, access management, and cost allocation. For more information, see Tagging Amazon Web Services Resources in the Amazon Web Services General Rareference", args: { name: "list", }, @@ -2335,7 +2335,7 @@ const completionSpec: Fig.Spec = { { name: "describe-fleet-location-attributes", description: - "Retrieves information on a fleet's remote locations, including life-cycle status and any suspended fleet activity. This operation can be used in the following ways: To get data for specific locations, provide a fleet identifier and a list of locations. Location data is returned in the order that it is requested. To get data for all locations, provide a fleet identifier only. Location data is returned in no particular order. When requesting attributes for multiple locations, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a LocationAttributes object is returned for each requested location. If the fleet does not have a requested location, no information is returned. This operation does not return the home Region. To get information on a fleet's home Region, call DescribeFleetAttributes. Learn more Setting up Amazon GameLift fleets", + "Retrieves information on a fleet's remote locations, including life-cycle status and any suspended fleet activity. This operation can be used in the following ways: To get data for specific locations, provide a fleet identifier and a list of locations. Location data is returned in the order that it is requested. To get data for all locations, provide a fleet identifier only. Location data is returned in no particular order. When requesting attributes for multiple locations, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a LocationAttributes object is returned for each requested location. If the fleet does not have a requested location, no information is returned. This operation does not return the home Region. To get information on a fleet's home Region, call DescribeFleetAttributes. Learn more Setting up Amazon GameLift fleets Amazon GameLift service locations for managed hosting", options: [ { name: "--fleet-id", @@ -2391,7 +2391,7 @@ const completionSpec: Fig.Spec = { { name: "describe-fleet-location-capacity", description: - "Retrieves the resource capacity settings for a fleet location. The data returned includes the current capacity (number of EC2 instances) and some scaling settings for the requested fleet location. For a container fleet, this operation also returns counts for replica container groups. Use this operation to retrieve capacity information for a fleet's remote location or home Region (you can also retrieve home Region capacity by calling DescribeFleetCapacity). To retrieve capacity data, identify a fleet and location. If successful, a FleetCapacity object is returned for the requested fleet location. Learn more Setting up Amazon GameLift fleets GameLift metrics for fleets", + "Retrieves the resource capacity settings for a fleet location. The data returned includes the current capacity (number of EC2 instances) and some scaling settings for the requested fleet location. For a container fleet, this operation also returns counts for replica container groups. Use this operation to retrieve capacity information for a fleet's remote location or home Region (you can also retrieve home Region capacity by calling DescribeFleetCapacity). To retrieve capacity data, identify a fleet and location. If successful, a FleetCapacity object is returned for the requested fleet location. Learn more Setting up Amazon GameLift fleets Amazon GameLift service locations for managed hosting GameLift metrics for fleets", options: [ { name: "--fleet-id", @@ -2431,7 +2431,7 @@ const completionSpec: Fig.Spec = { { name: "describe-fleet-location-utilization", description: - "Retrieves current usage data for a fleet location. Utilization data provides a snapshot of current game hosting activity at the requested location. Use this operation to retrieve utilization information for a fleet's remote location or home Region (you can also retrieve home Region utilization by calling DescribeFleetUtilization). To retrieve utilization data, identify a fleet and location. If successful, a FleetUtilization object is returned for the requested fleet location. Learn more Setting up Amazon GameLift fleets GameLift metrics for fleets", + "Retrieves current usage data for a fleet location. Utilization data provides a snapshot of current game hosting activity at the requested location. Use this operation to retrieve utilization information for a fleet's remote location or home Region (you can also retrieve home Region utilization by calling DescribeFleetUtilization). To retrieve utilization data, identify a fleet and location. If successful, a FleetUtilization object is returned for the requested fleet location. Learn more Setting up Amazon GameLift fleets Amazon GameLift service locations for managed hosting GameLift metrics for fleets", options: [ { name: "--fleet-id", @@ -3620,7 +3620,7 @@ const completionSpec: Fig.Spec = { { name: "get-compute-access", description: - "This operation has been expanded to use with the Amazon GameLift containers feature, which is currently in public preview. Requests authorization to remotely connect to a hosting resource in a Amazon GameLift managed fleet. This operation is not used with Amazon GameLift Anywhere fleets To request access, specify the compute name and the fleet ID. If successful, this operation returns a set of temporary Amazon Web Services credentials, including a two-part access key and a session token. EC2 fleets With an EC2 fleet (where compute type is EC2), use these credentials with Amazon EC2 Systems Manager (SSM) to start a session with the compute. For more details, see Starting a session (CLI) in the Amazon EC2 Systems Manager User Guide. Container fleets With a container fleet (where compute type is CONTAINER), use these credentials and the target value with SSM to connect to the fleet instance where the container is running. After you're connected to the instance, use Docker commands to interact with the container. Learn more Remotely connect to fleet instances Debug fleet issues Remotely connect to a container fleet", + "This operation has been expanded to use with the Amazon GameLift containers feature, which is currently in public preview. Requests authorization to remotely connect to a hosting resource in a Amazon GameLift managed fleet. This operation is not used with Amazon GameLift Anywhere fleets To request access, specify the compute name and the fleet ID. If successful, this operation returns a set of temporary Amazon Web Services credentials, including a two-part access key and a session token. EC2 fleets With an EC2 fleet (where compute type is EC2), use these credentials with Amazon EC2 Systems Manager (SSM) to start a session with the compute. For more details, see Starting a session (CLI) in the Amazon EC2 Systems Manager User Guide. Container fleets With a container fleet (where compute type is CONTAINER), use these credentials and the target value with SSM to connect to the fleet instance where the container is running. After you're connected to the instance, use Docker commands to interact with the container. Learn more Remotely connect to fleet instances Debug fleet issues", options: [ { name: "--fleet-id", @@ -4611,7 +4611,7 @@ const completionSpec: Fig.Spec = { { name: "--ip-address", description: - "The IP address of the compute resource. Amazon GameLift requires either a DNS name or IP address", + "The IP address of the compute resource. Amazon GameLift requires either a DNS name or IP address. When registering an Anywhere fleet, an IP address is required", args: { name: "string", }, @@ -4619,7 +4619,7 @@ const completionSpec: Fig.Spec = { { name: "--location", description: - "The name of a custom location to associate with the compute resource being registered", + "The name of a custom location to associate with the compute resource being registered. This parameter is required when registering a compute for an Anywhere fleet", args: { name: "string", }, diff --git a/src/aws/glue.ts b/src/aws/glue.ts index f205fcabbe34..5479c43deb3c 100644 --- a/src/aws/glue.ts +++ b/src/aws/glue.ts @@ -1579,6 +1579,16 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--job-run-queuing-enabled", + description: + "Specifies whether job run queuing is enabled for the job runs for this job. A value of true means job run queuing is enabled for the job runs. If false or not populated, the job runs will not be considered for queueing. If this field does not match the value set in the job run, then the value from the job run field will be used", + }, + { + name: "--no-job-run-queuing-enabled", + description: + "Specifies whether job run queuing is enabled for the job runs for this job. A value of true means job run queuing is enabled for the job runs. If false or not populated, the job runs will not be considered for queueing. If this field does not match the value set in the job run, then the value from the job run field will be used", + }, { name: "--description", description: "Description of the job being defined", @@ -8644,6 +8654,30 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, { name: "--generate-cli-skeleton", description: @@ -9917,6 +9951,16 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--job-run-queuing-enabled", + description: + "Specifies whether job run queuing is enabled for the job run. A value of true means job run queuing is enabled for the job run. If false or not populated, the job run will not be considered for queueing", + }, + { + name: "--no-job-run-queuing-enabled", + description: + "Specifies whether job run queuing is enabled for the job run. A value of true means job run queuing is enabled for the job run. If false or not populated, the job run will not be considered for queueing", + }, { name: "--job-run-id", description: "The ID of a previous JobRun to retry", diff --git a/src/aws/guardduty.ts b/src/aws/guardduty.ts index 53e2bc6f8630..a0fca2a1d093 100644 --- a/src/aws/guardduty.ts +++ b/src/aws/guardduty.ts @@ -218,7 +218,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The ID of the detector belonging to the GuardDuty account that you want to create a filter for", + "The detector ID associated with the GuardDuty account for which you want to create a filter", args: { name: "string", }, @@ -304,7 +304,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector of the GuardDuty account that you want to create an IPSet for", + "The unique ID of the detector of the GuardDuty account for which you want to create an IPSet", args: { name: "string", }, @@ -389,7 +389,7 @@ const completionSpec: Fig.Spec = { { name: "--role", description: - "IAM role with permissions required to scan and add tags to the associated protected resource", + "Amazon Resource Name (ARN) of the IAM role that has the permissions to scan and add tags to the associated protected resource", args: { name: "string", }, @@ -444,7 +444,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector of the GuardDuty account that you want to associate member accounts with", + "The unique ID of the detector of the GuardDuty account for which you want to associate member accounts", args: { name: "string", }, @@ -479,7 +479,7 @@ const completionSpec: Fig.Spec = { { name: "create-publishing-destination", description: - "Creates a publishing destination to export findings to. The resource to export findings to must exist before you use this operation", + "Creates a publishing destination where you can export your GuardDuty findings. Before you start exporting the findings, the destination resource must exist", options: [ { name: "--detector-id", @@ -538,7 +538,8 @@ const completionSpec: Fig.Spec = { options: [ { name: "--detector-id", - description: "The ID of the detector to create sample findings for", + description: + "The ID of the detector for which you need to create sample findings", args: { name: "string", }, @@ -577,7 +578,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector of the GuardDuty account that you want to create a threatIntelSet for", + "The unique ID of the detector of the GuardDuty account for which you want to create a ThreatIntelSet", args: { name: "string", }, @@ -718,7 +719,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector that the filter is associated with", + "The unique ID of the detector that is associated with the filter", args: { name: "string", }, @@ -939,7 +940,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector that the threatIntelSet is associated with", + "The unique ID of the detector that is associated with the threatIntelSet", args: { name: "string", }, @@ -1067,7 +1068,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The ID of the detector to retrieve information about the delegated administrator from", + "The detector ID of the delegated administrator for which you need to retrieve the information", args: { name: "string", }, @@ -1353,8 +1354,7 @@ const completionSpec: Fig.Spec = { options: [ { name: "--detector-id", - description: - "The unique ID of the GuardDuty detector associated to the coverage statistics", + description: "The unique ID of the GuardDuty detector", args: { name: "string", }, @@ -1397,7 +1397,7 @@ const completionSpec: Fig.Spec = { { name: "get-detector", description: - "Retrieves an Amazon GuardDuty detector specified by the detectorId. There might be regional differences because some data sources might not be available in all the Amazon Web Services Regions where GuardDuty is presently supported. For more information, see Regions and endpoints", + "Retrieves a GuardDuty detector specified by the detectorId. There might be regional differences because some data sources might not be available in all the Amazon Web Services Regions where GuardDuty is presently supported. For more information, see Regions and endpoints", options: [ { name: "--detector-id", @@ -1433,7 +1433,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector that the filter is associated with", + "The unique ID of the detector that is associated with this filter", args: { name: "string", }, @@ -1513,12 +1513,12 @@ const completionSpec: Fig.Spec = { { name: "get-findings-statistics", description: - "Lists Amazon GuardDuty findings statistics for the specified detector ID. There might be regional differences because some flags might not be available in all the Regions where GuardDuty is currently supported. For more information, see Regions and endpoints", + "Lists GuardDuty findings statistics for the specified detector ID. You must provide either findingStatisticTypes or groupBy parameter, and not both. You can use the maxResults and orderBy parameters only when using groupBy. There might be regional differences because some flags might not be available in all the Regions where GuardDuty is currently supported. For more information, see Regions and endpoints", options: [ { name: "--detector-id", description: - "The ID of the detector that specifies the GuardDuty service whose findings' statistics you want to retrieve", + "The ID of the detector whose findings statistics you want to retrieve", args: { name: "string", }, @@ -1538,6 +1538,30 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--group-by", + description: + "Displays the findings statistics grouped by one of the listed valid values", + args: { + name: "string", + }, + }, + { + name: "--order-by", + description: + "Displays the sorted findings in the requested order. The default value of orderBy is DESC. You can use this parameter only with the groupBy parameter", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum number of results to be returned in the response. The default value is 25. You can use this parameter only with the groupBy parameter", + args: { + name: "integer", + }, + }, { name: "--cli-input-json", description: @@ -1564,7 +1588,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector that the IPSet is associated with", + "The unique ID of the detector that is associated with the IPSet", args: { name: "string", }, @@ -1659,7 +1683,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector that the scan setting is associated with", + "The unique ID of the detector that is associated with this scan", args: { name: "string", }, @@ -1729,7 +1753,7 @@ const completionSpec: Fig.Spec = { }, { name: "--account-ids", - description: "The account ID of the member account", + description: "A list of member account IDs", args: { name: "list", }, @@ -1865,7 +1889,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector that the threatIntelSet is associated with", + "The unique ID of the detector that is associated with the threatIntelSet", args: { name: "string", }, @@ -1975,7 +1999,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector of the GuardDuty account that you want to invite members with", + "The unique ID of the detector of the GuardDuty account with which you want to invite members", args: { name: "string", }, @@ -2184,7 +2208,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector that the filter is associated with", + "The unique ID of the detector that is associated with the filter", args: { name: "string", }, @@ -2264,7 +2288,7 @@ const completionSpec: Fig.Spec = { { name: "--finding-criteria", description: - "Represents the criteria used for querying findings. Valid values include: JSON field name accountId region confidence id resource.accessKeyDetails.accessKeyId resource.accessKeyDetails.principalId resource.accessKeyDetails.userName resource.accessKeyDetails.userType resource.instanceDetails.iamInstanceProfile.id resource.instanceDetails.imageId resource.instanceDetails.instanceId resource.instanceDetails.networkInterfaces.ipv6Addresses resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress resource.instanceDetails.networkInterfaces.publicDnsName resource.instanceDetails.networkInterfaces.publicIp resource.instanceDetails.networkInterfaces.securityGroups.groupId resource.instanceDetails.networkInterfaces.securityGroups.groupName resource.instanceDetails.networkInterfaces.subnetId resource.instanceDetails.networkInterfaces.vpcId resource.instanceDetails.tags.key resource.instanceDetails.tags.value resource.resourceType service.action.actionType service.action.awsApiCallAction.api service.action.awsApiCallAction.callerType service.action.awsApiCallAction.remoteIpDetails.city.cityName service.action.awsApiCallAction.remoteIpDetails.country.countryName service.action.awsApiCallAction.remoteIpDetails.ipAddressV4 service.action.awsApiCallAction.remoteIpDetails.organization.asn service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg service.action.awsApiCallAction.serviceName service.action.dnsRequestAction.domain service.action.dnsRequestAction.domainWithSuffix service.action.networkConnectionAction.blocked service.action.networkConnectionAction.connectionDirection service.action.networkConnectionAction.localPortDetails.port service.action.networkConnectionAction.protocol service.action.networkConnectionAction.remoteIpDetails.country.countryName service.action.networkConnectionAction.remoteIpDetails.ipAddressV4 service.action.networkConnectionAction.remoteIpDetails.organization.asn service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg service.action.networkConnectionAction.remotePortDetails.port service.additionalInfo.threatListName service.archived When this attribute is set to 'true', only archived findings are listed. When it's set to 'false', only unarchived findings are listed. When this attribute is not set, all existing findings are listed. service.resourceRole severity type updatedAt Type: Timestamp in Unix Epoch millisecond format: 1486685375000", + "Represents the criteria used for querying findings. Valid values include: JSON field name accountId region confidence id resource.accessKeyDetails.accessKeyId resource.accessKeyDetails.principalId resource.accessKeyDetails.userName resource.accessKeyDetails.userType resource.instanceDetails.iamInstanceProfile.id resource.instanceDetails.imageId resource.instanceDetails.instanceId resource.instanceDetails.networkInterfaces.ipv6Addresses resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress resource.instanceDetails.networkInterfaces.publicDnsName resource.instanceDetails.networkInterfaces.publicIp resource.instanceDetails.networkInterfaces.securityGroups.groupId resource.instanceDetails.networkInterfaces.securityGroups.groupName resource.instanceDetails.networkInterfaces.subnetId resource.instanceDetails.networkInterfaces.vpcId resource.instanceDetails.tags.key resource.instanceDetails.tags.value resource.resourceType service.action.actionType service.action.awsApiCallAction.api service.action.awsApiCallAction.callerType service.action.awsApiCallAction.remoteIpDetails.city.cityName service.action.awsApiCallAction.remoteIpDetails.country.countryName service.action.awsApiCallAction.remoteIpDetails.ipAddressV4 service.action.awsApiCallAction.remoteIpDetails.organization.asn service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg service.action.awsApiCallAction.serviceName service.action.dnsRequestAction.domain service.action.dnsRequestAction.domainWithSuffix service.action.networkConnectionAction.blocked service.action.networkConnectionAction.connectionDirection service.action.networkConnectionAction.localPortDetails.port service.action.networkConnectionAction.protocol service.action.networkConnectionAction.remoteIpDetails.country.countryName service.action.networkConnectionAction.remoteIpDetails.ipAddressV4 service.action.networkConnectionAction.remoteIpDetails.organization.asn service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg service.action.networkConnectionAction.remotePortDetails.port service.additionalInfo.threatListName service.archived When this attribute is set to 'true', only archived findings are listed. When it's set to 'false', only unarchived findings are listed. When this attribute is not set, all existing findings are listed. service.ebsVolumeScanDetails.scanId service.resourceRole severity type updatedAt Type: Timestamp in Unix Epoch millisecond format: 1486685375000", args: { name: "structure", }, @@ -2343,7 +2367,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector that the IPSet is associated with", + "The unique ID of the detector that is associated with IPSet", args: { name: "string", }, @@ -2511,7 +2535,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector the member is associated with", + "The unique ID of the detector that is associated with the member", args: { name: "string", }, @@ -2655,7 +2679,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The ID of the detector to retrieve publishing destinations for", + "The detector ID for which you want to retrieve the publishing destination", args: { name: "string", }, @@ -2735,7 +2759,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector that the threatIntelSet is associated with", + "The unique ID of the detector that is associated with the threatIntelSet", args: { name: "string", }, @@ -3168,7 +3192,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The ID of the detector associated with the findings to update feedback for", + "The ID of the detector that is associated with the findings for which you want to update the feedback", args: { name: "string", }, @@ -3293,7 +3317,7 @@ const completionSpec: Fig.Spec = { { name: "--role", description: - "IAM role with permissions required to scan and add tags to the associated protected resource", + "Amazon Resource Name (ARN) of the IAM role with permissions to scan and add tags to the associated protected resource", args: { name: "string", }, diff --git a/src/aws/internetmonitor.ts b/src/aws/internetmonitor.ts index 5bc869b0be2b..94e098646702 100644 --- a/src/aws/internetmonitor.ts +++ b/src/aws/internetmonitor.ts @@ -671,7 +671,7 @@ const completionSpec: Fig.Spec = { { name: "--query-type", description: - "The type of query to run. The following are the three types of queries that you can run using the Internet Monitor query interface: MEASUREMENTS: Provides availability score, performance score, total traffic, and round-trip times, at 5 minute intervals. TOP_LOCATIONS: Provides availability score, performance score, total traffic, and time to first byte (TTFB) information, for the top location and ASN combinations that you're monitoring, by traffic volume. TOP_LOCATION_DETAILS: Provides TTFB for Amazon CloudFront, your current configuration, and the best performing EC2 configuration, at 1 hour intervals. For lists of the fields returned with each query type and more information about how each type of query is performed, see Using the Amazon CloudWatch Internet Monitor query interface in the Amazon CloudWatch Internet Monitor User Guide", + "The type of query to run. The following are the three types of queries that you can run using the Internet Monitor query interface: MEASUREMENTS: Provides availability score, performance score, total traffic, and round-trip times, at 5 minute intervals. TOP_LOCATIONS: Provides availability score, performance score, total traffic, and time to first byte (TTFB) information, for the top location and ASN combinations that you're monitoring, by traffic volume. TOP_LOCATION_DETAILS: Provides TTFB for Amazon CloudFront, your current configuration, and the best performing EC2 configuration, at 1 hour intervals. OVERALL_TRAFFIC_SUGGESTIONS: Provides TTFB, using a 30-day weighted average, for all traffic in each Amazon Web Services location that is monitored. OVERALL_TRAFFIC_SUGGESTIONS_DETAILS: Provides TTFB, using a 30-day weighted average, for each top location, for a proposed Amazon Web Services location. Must provide a Amazon Web Services location to search. For lists of the fields returned with each query type and more information about how each type of query is performed, see Using the Amazon CloudWatch Internet Monitor query interface in the Amazon CloudWatch Internet Monitor User Guide", args: { name: "string", }, diff --git a/src/aws/iot.ts b/src/aws/iot.ts index 1ba57a932bff..00872fb465eb 100644 --- a/src/aws/iot.ts +++ b/src/aws/iot.ts @@ -158,6 +158,60 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "associate-sbom-with-package-version", + description: + "Associates a software bill of materials (SBOM) with a specific software package version. Requires permission to access the AssociateSbomWithPackageVersion action", + options: [ + { + name: "--package-name", + description: "The name of the new software package", + args: { + name: "string", + }, + }, + { + name: "--version-name", + description: "The name of the new package version", + args: { + name: "string", + }, + }, + { + name: "--sbom", + description: + "The Amazon S3 location for the software bill of materials associated with a software package version", + args: { + name: "structure", + }, + }, + { + name: "--client-token", + description: + "A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "associate-targets-with-job", description: @@ -1976,6 +2030,22 @@ const completionSpec: Fig.Spec = { name: "map", }, }, + { + name: "--artifact", + description: + "The various build components created during the build process such as libraries and configuration files that make up a software package version", + args: { + name: "structure", + }, + }, + { + name: "--recipe", + description: + "The inline job document associated with a software package version used for a quick job deployment via IoT Jobs", + args: { + name: "string", + }, + }, { name: "--tags", description: @@ -4647,6 +4717,16 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--before-substitution", + description: + "A flag that provides a view of the job document before and after the substitution parameters have been resolved with their exact values", + }, + { + name: "--no-before-substitution", + description: + "A flag that provides a view of the job document before and after the substitution parameters have been resolved with their exact values", + }, { name: "--cli-input-json", description: @@ -5320,6 +5400,52 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "disassociate-sbom-from-package-version", + description: + "Disassociates a software bill of materials (SBOM) from a specific software package version. Requires permission to access the DisassociateSbomWithPackageVersion action", + options: [ + { + name: "--package-name", + description: "The name of the new software package", + args: { + name: "string", + }, + }, + { + name: "--version-name", + description: "The name of the new package version", + args: { + name: "string", + }, + }, + { + name: "--client-token", + description: + "A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "enable-topic-rule", description: @@ -5616,6 +5742,16 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--before-substitution", + description: + "A flag that provides a view of the job document before and after the substitution parameters have been resolved with their exact values", + }, + { + name: "--no-before-substitution", + description: + "A flag that provides a view of the job document before and after the substitution parameters have been resolved with their exact values", + }, { name: "--cli-input-json", description: @@ -9053,6 +9189,90 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-sbom-validation-results", + description: + "The validation results for all software bill of materials (SBOM) attached to a specific software package version. Requires permission to access the ListSbomValidationResults action", + options: [ + { + name: "--package-name", + description: "The name of the new software package", + args: { + name: "string", + }, + }, + { + name: "--version-name", + description: "The name of the new package version", + args: { + name: "string", + }, + }, + { + name: "--validation-result", + description: "The end result of the", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: "The maximum number of results to return at one time", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: + "A token that can be used to retrieve the next set of results, or null if there are no additional results", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-scheduled-audits", description: @@ -12779,6 +12999,14 @@ const completionSpec: Fig.Spec = { name: "map", }, }, + { + name: "--artifact", + description: + "The various components that make up a software package version", + args: { + name: "structure", + }, + }, { name: "--action", description: @@ -12787,6 +13015,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--recipe", + description: + "The inline job document associated with a software package version used for a quick job deployment via IoT Jobs", + args: { + name: "string", + }, + }, { name: "--client-token", description: diff --git a/src/aws/iotsitewise.ts b/src/aws/iotsitewise.ts index 4af421ee850d..d56d96aedf28 100644 --- a/src/aws/iotsitewise.ts +++ b/src/aws/iotsitewise.ts @@ -530,6 +530,30 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--asset-model-type", + description: + "The type of asset model. ASSET_MODEL \u2013 (default) An asset model that you can use to create assets. Can't be included as a component in another asset model. COMPONENT_MODEL \u2013 A reusable component that you can include in the composite models of other asset models. You can't create assets directly from this type of asset model", + args: { + name: "string", + }, + }, + { + name: "--asset-model-id", + description: + "The ID to assign to the asset model, if desired. IoT SiteWise automatically generates a unique ID for you, so this parameter is never required. However, if you prefer to supply your own ID instead, you can specify it here in UUID format. If you specify your own ID, it must be globally unique", + args: { + name: "string", + }, + }, + { + name: "--asset-model-external-id", + description: + "An external ID to assign to the asset model. The external ID must be unique within your Amazon Web Services account. For more information, see Using external IDs in the IoT SiteWise User Guide", + args: { + name: "string", + }, + }, { name: "--asset-model-description", description: "A description for the asset model", @@ -577,30 +601,6 @@ const completionSpec: Fig.Spec = { name: "map", }, }, - { - name: "--asset-model-id", - description: - "The ID to assign to the asset model, if desired. IoT SiteWise automatically generates a unique ID for you, so this parameter is never required. However, if you prefer to supply your own ID instead, you can specify it here in UUID format. If you specify your own ID, it must be globally unique", - args: { - name: "string", - }, - }, - { - name: "--asset-model-external-id", - description: - "An external ID to assign to the asset model. The external ID must be unique within your Amazon Web Services account. For more information, see Using external IDs in the IoT SiteWise User Guide", - args: { - name: "string", - }, - }, - { - name: "--asset-model-type", - description: - "The type of asset model. ASSET_MODEL \u2013 (default) An asset model that you can use to create assets. Can't be included as a component in another asset model. COMPONENT_MODEL \u2013 A reusable component that you can include in the composite models of other asset models. You can't create assets directly from this type of asset model", - args: { - name: "string", - }, - }, { name: "--cli-input-json", description: @@ -634,17 +634,17 @@ const completionSpec: Fig.Spec = { }, }, { - name: "--parent-asset-model-composite-model-id", + name: "--asset-model-composite-model-external-id", description: - "The ID of the parent composite model in this asset model relationship", + "An external ID to assign to the composite model. If the composite model is a derived composite model, or one nested inside a component model, you can only set the external ID using UpdateAssetModelCompositeModel and specifying the derived ID of the model or property from the created model it's a part of", args: { name: "string", }, }, { - name: "--asset-model-composite-model-external-id", + name: "--parent-asset-model-composite-model-id", description: - "An external ID to assign to the composite model. If the composite model is a derived composite model, or one nested inside a component model, you can only set the external ID using UpdateAssetModelCompositeModel and specifying the derived ID of the model or property from the created model it's a part of", + "The ID of the parent composite model in this asset model relationship", args: { name: "string", }, @@ -703,6 +703,30 @@ const completionSpec: Fig.Spec = { name: "list", }, }, + { + name: "--if-match", + description: + "The expected current entity tag (ETag) for the asset model\u2019s latest or active version (specified using matchForVersionType). The create request is rejected if the tag does not match the latest or active version's current entity tag. See Optimistic locking for asset model writes in the IoT SiteWise User Guide", + args: { + name: "string", + }, + }, + { + name: "--if-none-match", + description: + "Accepts * to reject the create request if an active version (specified using matchForVersionType as ACTIVE) already exists for the asset model", + args: { + name: "string", + }, + }, + { + name: "--match-for-version-type", + description: + "Specifies the asset model version type (LATEST or ACTIVE) used in conjunction with If-Match or If-None-Match headers to determine the target ETag for the create operation", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -1183,6 +1207,30 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--if-match", + description: + "The expected current entity tag (ETag) for the asset model\u2019s latest or active version (specified using matchForVersionType). The delete request is rejected if the tag does not match the latest or active version's current entity tag. See Optimistic locking for asset model writes in the IoT SiteWise User Guide", + args: { + name: "string", + }, + }, + { + name: "--if-none-match", + description: + "Accepts * to reject the delete request if an active version (specified using matchForVersionType as ACTIVE) already exists for the asset model", + args: { + name: "string", + }, + }, + { + name: "--match-for-version-type", + description: + "Specifies the asset model version type (LATEST or ACTIVE) used in conjunction with If-Match or If-None-Match headers to determine the target ETag for the delete operation", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -1229,6 +1277,30 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--if-match", + description: + "The expected current entity tag (ETag) for the asset model\u2019s latest or active version (specified using matchForVersionType). The delete request is rejected if the tag does not match the latest or active version's current entity tag. See Optimistic locking for asset model writes in the IoT SiteWise User Guide", + args: { + name: "string", + }, + }, + { + name: "--if-none-match", + description: + "Accepts * to reject the delete request if an active version (specified using matchForVersionType as ACTIVE) already exists for the asset model", + args: { + name: "string", + }, + }, + { + name: "--match-for-version-type", + description: + "Specifies the asset model version type (LATEST or ACTIVE) used in conjunction with If-Match or If-None-Match headers to determine the target ETag for the delete operation", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -1612,6 +1684,14 @@ const completionSpec: Fig.Spec = { description: "Whether or not to exclude asset model properties from the response", }, + { + name: "--asset-model-version", + description: + "The version alias that specifies the latest or active version of the asset model. The details are returned in the response. The default value is LATEST. See Asset model versions in the IoT SiteWise User Guide", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -1652,6 +1732,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--asset-model-version", + description: + "The version alias that specifies the latest or active version of the asset model. The details are returned in the response. The default value is LATEST. See Asset model versions in the IoT SiteWise User Guide", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -2907,6 +2995,14 @@ const completionSpec: Fig.Spec = { name: "integer", }, }, + { + name: "--asset-model-version", + description: + "The version alias that specifies the latest or active version of the asset model. The details are returned in the response. The default value is LATEST. See Asset model versions in the IoT SiteWise User Guide", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -2987,6 +3083,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--asset-model-version", + description: + "The version alias that specifies the latest or active version of the asset model. The details are returned in the response. The default value is LATEST. See Asset model versions in the IoT SiteWise User Guide", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -3035,6 +3139,14 @@ const completionSpec: Fig.Spec = { description: "Retrieves a paginated list of summaries of all asset models", options: [ + { + name: "--asset-model-types", + description: + "The type of asset model. If you don't provide an assetModelTypes, all types of asset models are returned. ASSET_MODEL \u2013 An asset model that you can use to create assets. Can't be included as a component in another asset model. COMPONENT_MODEL \u2013 A reusable component that you can include in the composite models of other asset models. You can't create assets directly from this type of asset model", + args: { + name: "list", + }, + }, { name: "--next-token", description: @@ -3052,11 +3164,11 @@ const completionSpec: Fig.Spec = { }, }, { - name: "--asset-model-types", + name: "--asset-model-version", description: - "The type of asset model. ASSET_MODEL \u2013 (default) An asset model that you can use to create assets. Can't be included as a component in another asset model. COMPONENT_MODEL \u2013 A reusable component that you can include in the composite models of other asset models. You can't create assets directly from this type of asset model", + "The version alias that specifies the latest or active version of the asset model. The details are returned in the response. The default value is LATEST. See Asset model versions in the IoT SiteWise User Guide", args: { - name: "list", + name: "string", }, }, { @@ -3345,7 +3457,7 @@ const completionSpec: Fig.Spec = { { name: "list-associated-assets", description: - "Retrieves a paginated list of associated assets. You can use this operation to do the following: List child assets associated to a parent asset by a hierarchy that you specify. List an asset's parent asset", + "Retrieves a paginated list of associated assets. You can use this operation to do the following: CHILD - List all child assets associated to the asset. PARENT - List the asset's parent asset", options: [ { name: "--asset-id", @@ -3358,7 +3470,7 @@ const completionSpec: Fig.Spec = { { name: "--hierarchy-id", description: - "The ID of the hierarchy by which child assets are associated to the asset. (This can be either the actual ID in UUID format, or else externalId: followed by the external ID, if it has one. For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.) To find a hierarchy ID, use the DescribeAsset or DescribeAssetModel operations. This parameter is required if you choose CHILD for traversalDirection. For more information, see Asset hierarchies in the IoT SiteWise User Guide", + "(Optional) If you don't provide a hierarchyId, all the immediate assets in the traversalDirection will be returned. The ID of the hierarchy by which child assets are associated to the asset. (This can be either the actual ID in UUID format, or else externalId: followed by the external ID, if it has one. For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide.) For more information, see Asset hierarchies in the IoT SiteWise User Guide", args: { name: "string", }, @@ -3366,7 +3478,7 @@ const completionSpec: Fig.Spec = { { name: "--traversal-direction", description: - "The direction to list associated assets. Choose one of the following options: CHILD \u2013 The list includes all child assets associated to the asset. The hierarchyId parameter is required if you choose CHILD. PARENT \u2013 The list includes the asset's parent asset. Default: CHILD", + "The direction to list associated assets. Choose one of the following options: CHILD \u2013 The list includes all child assets associated to the asset. PARENT \u2013 The list includes the asset's parent asset. Default: CHILD", args: { name: "string", }, @@ -4384,6 +4496,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--asset-model-external-id", + description: + "An external ID to assign to the asset model. The asset model must not already have an external ID. The external ID must be unique within your Amazon Web Services account. For more information, see Using external IDs in the IoT SiteWise User Guide", + args: { + name: "string", + }, + }, { name: "--asset-model-name", description: "A unique name for the asset model", @@ -4431,9 +4551,25 @@ const completionSpec: Fig.Spec = { }, }, { - name: "--asset-model-external-id", + name: "--if-match", description: - "An external ID to assign to the asset model. The asset model must not already have an external ID. The external ID must be unique within your Amazon Web Services account. For more information, see Using external IDs in the IoT SiteWise User Guide", + "The expected current entity tag (ETag) for the asset model\u2019s latest or active version (specified using matchForVersionType). The update request is rejected if the tag does not match the latest or active version's current entity tag. See Optimistic locking for asset model writes in the IoT SiteWise User Guide", + args: { + name: "string", + }, + }, + { + name: "--if-none-match", + description: + "Accepts * to reject the update request if an active version (specified using matchForVersionType as ACTIVE) already exists for the asset model", + args: { + name: "string", + }, + }, + { + name: "--match-for-version-type", + description: + "Specifies the asset model version type (LATEST or ACTIVE) used in conjunction with If-Match or If-None-Match headers to determine the target ETag for the update operation", args: { name: "string", }, @@ -4514,6 +4650,30 @@ const completionSpec: Fig.Spec = { name: "list", }, }, + { + name: "--if-match", + description: + "The expected current entity tag (ETag) for the asset model\u2019s latest or active version (specified using matchForVersionType). The update request is rejected if the tag does not match the latest or active version's current entity tag. See Optimistic locking for asset model writes in the IoT SiteWise User Guide", + args: { + name: "string", + }, + }, + { + name: "--if-none-match", + description: + "Accepts * to reject the update request if an active version (specified using matchForVersionType as ACTIVE) already exists for the asset model", + args: { + name: "string", + }, + }, + { + name: "--match-for-version-type", + description: + "Specifies the asset model version type (LATEST or ACTIVE) used in conjunction with If-Match or If-None-Match headers to determine the target ETag for the update operation", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -4962,6 +5122,14 @@ const completionSpec: Fig.Spec = { description: "Whether or not to exclude asset model properties from the response", }, + { + name: "--asset-model-version", + description: + "The version alias that specifies the latest or active version of the asset model. The details are returned in the response. The default value is LATEST. See Asset model versions in the IoT SiteWise User Guide", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -5004,6 +5172,14 @@ const completionSpec: Fig.Spec = { description: "Whether or not to exclude asset model properties from the response", }, + { + name: "--asset-model-version", + description: + "The version alias that specifies the latest or active version of the asset model. The details are returned in the response. The default value is LATEST. See Asset model versions in the IoT SiteWise User Guide", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/ivs-realtime.ts b/src/aws/ivs-realtime.ts index 8807084bac32..1d48a260b665 100644 --- a/src/aws/ivs-realtime.ts +++ b/src/aws/ivs-realtime.ts @@ -1,7 +1,7 @@ const completionSpec: Fig.Spec = { name: "ivs-realtime", description: - 'The Amazon Interactive Video Service (IVS) real-time API is REST compatible, using a standard HTTP API and an AWS EventBridge event stream for responses. JSON is used for both requests and responses, including errors. Key Concepts Stage \u2014 A virtual space where participants can exchange video in real time. Participant token \u2014 A token that authenticates a participant when they join a stage. Participant object \u2014 Represents participants (people) in the stage and contains information about them. When a token is created, it includes a participant ID; when a participant uses that token to join a stage, the participant is associated with that participant ID. There is a 1:1 mapping between participant tokens and participants. For server-side composition: Composition process \u2014 Composites participants of a stage into a single video and forwards it to a set of outputs (e.g., IVS channels). Composition endpoints support this process. Composition \u2014 Controls the look of the outputs, including how participants are positioned in the video. For more information about your IVS live stream, also see Getting Started with Amazon IVS Real-Time Streaming. Tagging A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging AWS Resources for more information, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS stages has no service-specific constraints beyond what is documented there. Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags). The Amazon IVS real-time API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Stage. At most 50 tags can be applied to a resource', + 'The Amazon Interactive Video Service (IVS) real-time API is REST compatible, using a standard HTTP API and an AWS EventBridge event stream for responses. JSON is used for both requests and responses, including errors. Key Concepts Stage \u2014 A virtual space where participants can exchange video in real time. Participant token \u2014 A token that authenticates a participant when they join a stage. Participant object \u2014 Represents participants (people) in the stage and contains information about them. When a token is created, it includes a participant ID; when a participant uses that token to join a stage, the participant is associated with that participant ID. There is a 1:1 mapping between participant tokens and participants. For server-side composition: Composition process \u2014 Composites participants of a stage into a single video and forwards it to a set of outputs (e.g., IVS channels). Composition operations support this process. Composition \u2014 Controls the look of the outputs, including how participants are positioned in the video. For more information about your IVS live stream, also see Getting Started with Amazon IVS Real-Time Streaming. Tagging A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Best practices and strategies in Tagging AWS Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS stages has no service-specific constraints beyond what is documented there. Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags). The Amazon IVS real-time API has these tag-related operations: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Stage. At most 50 tags can be applied to a resource', subcommands: [ { name: "create-encoder-configuration", @@ -25,7 +25,89 @@ const completionSpec: Fig.Spec = { { name: "--tags", description: - 'Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no constraints on tags beyond what is documented there', + 'Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Best practices and strategies in Tagging AWS Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no constraints on tags beyond what is documented there', + args: { + name: "map", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "create-ingest-configuration", + description: + "Creates a new IngestConfiguration resource, used to specify the ingest protocol for a stage", + options: [ + { + name: "--name", + description: + "Optional name that can be specified for the IngestConfiguration being created", + args: { + name: "string", + }, + }, + { + name: "--stage-arn", + description: + "ARN of the stage with which the IngestConfiguration is associated", + args: { + name: "string", + }, + }, + { + name: "--user-id", + description: + "Customer-assigned name to help identify the participant using the IngestConfiguration; this can be used to link a participant to a user in the customer\u2019s own systems. This can be any UTF-8 encoded text. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information", + args: { + name: "string", + }, + }, + { + name: "--attributes", + description: + "Application-provided attributes to store in the IngestConfiguration and attach to a stage. Map keys and values can contain UTF-8 encoded text. The maximum length of this field is 1 KB total. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information", + args: { + name: "map", + }, + }, + { + name: "--ingest-protocol", + description: + "Type of ingest protocol that the user employs to broadcast. If this is set to RTMP, insecureIngest must be set to true", + args: { + name: "string", + }, + }, + { + name: "--insecure-ingest", + description: + "Whether the stage allows insecure RTMP ingest. This must be set to true, if ingestProtocol is set to RTMP. Default: false", + }, + { + name: "--no-insecure-ingest", + description: + "Whether the stage allows insecure RTMP ingest. This must be set to true, if ingestProtocol is set to RTMP. Default: false", + }, + { + name: "--tags", + description: + 'Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Best practices and strategies in Tagging AWS Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no constraints on tags beyond what is documented there', args: { name: "map", }, @@ -135,7 +217,7 @@ const completionSpec: Fig.Spec = { { name: "--tags", description: - 'Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no constraints on tags beyond what is documented there', + 'Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Best practices and strategies in Tagging AWS Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no constraints on tags beyond what is documented there', args: { name: "map", }, @@ -191,7 +273,7 @@ const completionSpec: Fig.Spec = { { name: "--tags", description: - 'Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no constraints on tags beyond what is documented there', + 'Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Best practices and strategies in Tagging AWS Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no constraints on tags beyond what is documented there', args: { name: "map", }, @@ -246,6 +328,47 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-ingest-configuration", + description: + "Deletes a specified IngestConfiguration, so it can no longer be used to broadcast. An IngestConfiguration cannot be deleted if the publisher is actively streaming to a stage, unless force is set to true", + options: [ + { + name: "--arn", + description: "ARN of the IngestConfiguration", + args: { + name: "string", + }, + }, + { + name: "--force", + description: + "Optional field to force deletion of the IngestConfiguration. If this is set to true when a participant is actively publishing, the participant is disconnected from the stage, followed by deletion of the IngestConfiguration. Default: false", + }, + { + name: "--no-force", + description: + "Optional field to force deletion of the IngestConfiguration. If this is set to true when a participant is actively publishing, the participant is disconnected from the stage, followed by deletion of the IngestConfiguration. Default: false", + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-public-key", description: @@ -280,7 +403,7 @@ const completionSpec: Fig.Spec = { { name: "delete-stage", description: - "Shuts down and deletes the specified stage (disconnecting all participants)", + "Shuts down and deletes the specified stage (disconnecting all participants). This operation also removes the stageArn from the associated IngestConfiguration, if there are participants using the IngestConfiguration to publish to the stage", options: [ { name: "--arn", @@ -342,7 +465,7 @@ const completionSpec: Fig.Spec = { { name: "disconnect-participant", description: - "Disconnects a specified participant and revokes the participant permanently from a specified stage", + "Disconnects a specified participant from a specified stage. If the participant is publishing using an IngestConfiguration, DisconnectParticipant also updates the stageArn in the IngestConfiguration to be an empty string", options: [ { name: "--stage-arn", @@ -354,7 +477,7 @@ const completionSpec: Fig.Spec = { { name: "--participant-id", description: - "Identifier of the participant to be disconnected. This is assigned by IVS and returned by CreateParticipantToken", + "Identifier of the participant to be disconnected. IVS assigns this; it is returned by CreateParticipantToken (for streams using WebRTC ingest) or CreateIngestConfiguration (for streams using RTMP ingest)", args: { name: "string", }, @@ -447,6 +570,37 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-ingest-configuration", + description: "Gets information about the specified IngestConfiguration", + options: [ + { + name: "--arn", + description: + "ARN of the ingest for which the information is to be retrieved", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-participant", description: "Gets information about the specified participant token", @@ -644,7 +798,7 @@ const completionSpec: Fig.Spec = { { name: "--tags", description: - 'Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no constraints on tags beyond what is documented there', + 'Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Best practices and strategies in Tagging AWS Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no constraints on tags beyond what is documented there', args: { name: "map", }, @@ -762,6 +916,85 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-ingest-configurations", + description: + "Lists all IngestConfigurations in your account, in the AWS region where the API request is processed", + options: [ + { + name: "--filter-by-stage-arn", + description: + "Filters the response list to match the specified stage ARN. Only one filter (by stage ARN or by state) can be used at a time", + args: { + name: "string", + }, + }, + { + name: "--filter-by-state", + description: + "Filters the response list to match the specified state. Only one filter (by stage ARN or by state) can be used at a time", + args: { + name: "string", + }, + }, + { + name: "--next-token", + description: + "The first IngestConfiguration to retrieve. This is used for pagination; see the nextToken response field", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: "Maximum number of results to return. Default: 50", + args: { + name: "integer", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-participant-events", description: @@ -1130,7 +1363,7 @@ const completionSpec: Fig.Spec = { { name: "start-composition", description: - "Starts a Composition from a stage based on the configuration provided in the request. A Composition is an ephemeral resource that exists after this endpoint returns successfully. Composition stops and the resource is deleted: When StopComposition is called. After a 1-minute timeout, when all participants are disconnected from the stage. After a 1-minute timeout, if there are no participants in the stage when StartComposition is called. When broadcasting to the IVS channel fails and all retries are exhausted. When broadcasting is disconnected and all attempts to reconnect are exhausted", + "Starts a Composition from a stage based on the configuration provided in the request. A Composition is an ephemeral resource that exists after this operation returns successfully. Composition stops and the resource is deleted: When StopComposition is called. After a 1-minute timeout, when all participants are disconnected from the stage. After a 1-minute timeout, if there are no participants in the stage when StartComposition is called. When broadcasting to the IVS channel fails and all retries are exhausted. When broadcasting is disconnected and all attempts to reconnect are exhausted", options: [ { name: "--stage-arn", @@ -1163,7 +1396,7 @@ const completionSpec: Fig.Spec = { { name: "--tags", description: - 'Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no constraints on tags beyond what is documented there', + 'Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Best practices and strategies in Tagging AWS Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no constraints on tags beyond what is documented there', args: { name: "map", }, @@ -1234,7 +1467,7 @@ const completionSpec: Fig.Spec = { { name: "--tags", description: - 'Array of tags to be added or updated. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no constraints beyond what is documented there', + 'Array of tags to be added or updated. Array of maps, each of the form string:string (key:value). See Best practices and strategies in Tagging AWS Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no constraints on tags beyond what is documented there', args: { name: "map", }, @@ -1273,7 +1506,7 @@ const completionSpec: Fig.Spec = { { name: "--tag-keys", description: - 'Array of tags to be removed. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no constraints beyond what is documented there', + 'Array of tags to be removed. Array of maps, each of the form string:string (key:value). See Best practices and strategies in Tagging AWS Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no constraints on tags beyond what is documented there', args: { name: "list", }, @@ -1297,6 +1530,45 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "update-ingest-configuration", + description: + "Updates a specified IngestConfiguration. Only the stage ARN attached to the IngestConfiguration can be updated. An IngestConfiguration that is active cannot be updated", + options: [ + { + name: "--arn", + description: + "ARN of the IngestConfiguration, for which the related stage ARN needs to be updated", + args: { + name: "string", + }, + }, + { + name: "--stage-arn", + description: "Stage ARN that needs to be updated", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "update-stage", description: "Updates a stage\u2019s configuration", diff --git a/src/aws/ivs.ts b/src/aws/ivs.ts index b9df6cf81189..5acb15b0364c 100644 --- a/src/aws/ivs.ts +++ b/src/aws/ivs.ts @@ -1,7 +1,7 @@ const completionSpec: Fig.Spec = { name: "ivs", description: - 'Introduction The Amazon Interactive Video Service (IVS) API is REST compatible, using a standard HTTP API and an Amazon Web Services EventBridge event stream for responses. JSON is used for both requests and responses, including errors. The API is an Amazon Web Services regional service. For a list of supported regions and Amazon IVS HTTPS service endpoints, see the Amazon IVS page in the Amazon Web Services General Reference. All API request parameters and URLs are case sensitive. For a summary of notable documentation changes in each release, see Document History. Allowed Header Values Accept: application/json Accept-Encoding: gzip, deflate Content-Type: application/json Key Concepts Channel \u2014 Stores configuration data related to your live stream. You first create a channel and then use the channel\u2019s stream key to start your live stream. Stream key \u2014 An identifier assigned by Amazon IVS when you create a channel, which is then used to authorize streaming. Treat the stream key like a secret, since it allows anyone to stream to the channel. Playback key pair \u2014 Video playback may be restricted using playback-authorization tokens, which use public-key encryption. A playback key pair is the public-private pair of keys used to sign and validate the playback-authorization token. Recording configuration \u2014 Stores configuration related to recording a live stream and where to store the recorded content. Multiple channels can reference the same recording configuration. Playback restriction policy \u2014 Restricts playback by countries and/or origin sites. For more information about your IVS live stream, also see Getting Started with IVS Low-Latency Streaming. Tagging A tag is a metadata label that you assign to an Amazon Web Services resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no service-specific constraints beyond what is documented there. Tags can help you identify and organize your Amazon Web Services resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags). The Amazon IVS API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resources support tagging: Channels, Stream Keys, Playback Key Pairs, and Recording Configurations. At most 50 tags can be applied to a resource. Authentication versus Authorization Note the differences between these concepts: Authentication is about verifying identity. You need to be authenticated to sign Amazon IVS API requests. Authorization is about granting permissions. Your IAM roles need to have permissions for Amazon IVS API requests. In addition, authorization is needed to view Amazon IVS private channels. (Private channels are channels that are enabled for "playback authorization.") Authentication All Amazon IVS API requests must be authenticated with a signature. The Amazon Web Services Command-Line Interface (CLI) and Amazon IVS Player SDKs take care of signing the underlying API calls for you. However, if your application calls the Amazon IVS API directly, it\u2019s your responsibility to sign the requests. You generate a signature using valid Amazon Web Services credentials that have permission to perform the requested action. For example, you must sign PutMetadata requests with a signature generated from a user account that has the ivs:PutMetadata permission. For more information: Authentication and generating signatures \u2014 See Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon Web Services General Reference. Managing Amazon IVS permissions \u2014 See Identity and Access Management on the Security page of the Amazon IVS User Guide. Amazon Resource Names (ARNs) ARNs uniquely identify AWS resources. An ARN is required when you need to specify a resource unambiguously across all of AWS, such as in IAM policies and API calls. For more information, see Amazon Resource Names in the AWS General Reference', + 'Introduction The Amazon Interactive Video Service (IVS) API is REST compatible, using a standard HTTP API and an Amazon Web Services EventBridge event stream for responses. JSON is used for both requests and responses, including errors. The API is an Amazon Web Services regional service. For a list of supported regions and Amazon IVS HTTPS service endpoints, see the Amazon IVS page in the Amazon Web Services General Reference. All API request parameters and URLs are case sensitive. For a summary of notable documentation changes in each release, see Document History. Allowed Header Values Accept: application/json Accept-Encoding: gzip, deflate Content-Type: application/json Key Concepts Channel \u2014 Stores configuration data related to your live stream. You first create a channel and then use the channel\u2019s stream key to start your live stream. Stream key \u2014 An identifier assigned by Amazon IVS when you create a channel, which is then used to authorize streaming. Treat the stream key like a secret, since it allows anyone to stream to the channel. Playback key pair \u2014 Video playback may be restricted using playback-authorization tokens, which use public-key encryption. A playback key pair is the public-private pair of keys used to sign and validate the playback-authorization token. Recording configuration \u2014 Stores configuration related to recording a live stream and where to store the recorded content. Multiple channels can reference the same recording configuration. Playback restriction policy \u2014 Restricts playback by countries and/or origin sites. For more information about your IVS live stream, also see Getting Started with IVS Low-Latency Streaming. Tagging A tag is a metadata label that you assign to an Amazon Web Services resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Best practices and strategies in Tagging Amazon Web Services Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no service-specific constraints beyond what is documented there. Tags can help you identify and organize your Amazon Web Services resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags). The Amazon IVS API has these tag-related operations: TagResource, UntagResource, and ListTagsForResource. The following resources support tagging: Channels, Stream Keys, Playback Key Pairs, and Recording Configurations. At most 50 tags can be applied to a resource. Authentication versus Authorization Note the differences between these concepts: Authentication is about verifying identity. You need to be authenticated to sign Amazon IVS API requests. Authorization is about granting permissions. Your IAM roles need to have permissions for Amazon IVS API requests. In addition, authorization is needed to view Amazon IVS private channels. (Private channels are channels that are enabled for "playback authorization.") Authentication All Amazon IVS API requests must be authenticated with a signature. The Amazon Web Services Command-Line Interface (CLI) and Amazon IVS Player SDKs take care of signing the underlying API calls for you. However, if your application calls the Amazon IVS API directly, it\u2019s your responsibility to sign the requests. You generate a signature using valid Amazon Web Services credentials that have permission to perform the requested action. For example, you must sign PutMetadata requests with a signature generated from a user account that has the ivs:PutMetadata permission. For more information: Authentication and generating signatures \u2014 See Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon Web Services General Reference. Managing Amazon IVS permissions \u2014 See Identity and Access Management on the Security page of the Amazon IVS User Guide. Amazon Resource Names (ARNs) ARNs uniquely identify AWS resources. An ARN is required when you need to specify a resource unambiguously across all of AWS, such as in IAM policies and API calls. For more information, see Amazon Resource Names in the AWS General Reference', subcommands: [ { name: "batch-get-channel", @@ -144,7 +144,7 @@ const completionSpec: Fig.Spec = { { name: "--tags", description: - 'Array of 1-50 maps, each of the form string:string (key:value). See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no service-specific constraints beyond what is documented there', + 'Array of 1-50 maps, each of the form string:string (key:value). See Best practices and strategies in Tagging Amazon Web Services Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no service-specific constraints beyond what is documented there', args: { name: "map", }, @@ -236,7 +236,7 @@ const completionSpec: Fig.Spec = { { name: "--tags", description: - 'Array of 1-50 maps, each of the form string:string (key:value). See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no service-specific constraints beyond what is documented there', + 'Array of 1-50 maps, each of the form string:string (key:value). See Best practices and strategies in Tagging Amazon Web Services Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no service-specific constraints beyond what is documented there', args: { name: "map", }, @@ -284,7 +284,7 @@ const completionSpec: Fig.Spec = { { name: "--tags", description: - 'Array of 1-50 maps, each of the form string:string (key:value). See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no service-specific constraints beyond what is documented there', + 'Array of 1-50 maps, each of the form string:string (key:value). See Best practices and strategies in Tagging Amazon Web Services Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no service-specific constraints beyond what is documented there', args: { name: "map", }, @@ -347,7 +347,7 @@ const completionSpec: Fig.Spec = { { name: "--tags", description: - 'Array of 1-50 maps, each of the form string:string (key:value). See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no service-specific constraints beyond what is documented there', + 'Array of 1-50 maps, each of the form string:string (key:value). See Best practices and strategies in Tagging Amazon Web Services Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no service-specific constraints beyond what is documented there', args: { name: "map", }, @@ -770,7 +770,7 @@ const completionSpec: Fig.Spec = { { name: "--tags", description: - 'Any tags provided with the request are added to the playback key pair tags. See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no service-specific constraints beyond what is documented there', + 'Any tags provided with the request are added to the playback key pair tags. See Best practices and strategies in Tagging Amazon Web Services Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no service-specific constraints beyond what is documented there', args: { name: "map", }, @@ -1401,7 +1401,7 @@ const completionSpec: Fig.Spec = { { name: "--tags", description: - 'Array of tags to be added or updated. Array of maps, each of the form string:string (key:value). See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no service-specific constraints beyond what is documented there', + 'Array of tags to be added or updated. Array of maps, each of the form string:string (key:value). See Best practices and strategies in Tagging Amazon Web Services Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no service-specific constraints beyond what is documented there', args: { name: "map", }, @@ -1440,7 +1440,7 @@ const completionSpec: Fig.Spec = { { name: "--tag-keys", description: - 'Array of tags to be removed. Array of maps, each of the form string:string (key:value). See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no service-specific constraints beyond what is documented there', + 'Array of tags to be removed. Array of maps, each of the form string:string (key:value). See Best practices and strategies in Tagging Amazon Web Services Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no service-specific constraints beyond what is documented there', args: { name: "list", }, diff --git a/src/aws/ivschat.ts b/src/aws/ivschat.ts index 782b9ad46c34..b595021e7ba1 100644 --- a/src/aws/ivschat.ts +++ b/src/aws/ivschat.ts @@ -1,7 +1,7 @@ const completionSpec: Fig.Spec = { name: "ivschat", description: - 'Introduction The Amazon IVS Chat control-plane API enables you to create and manage Amazon IVS Chat resources. You also need to integrate with the Amazon IVS Chat Messaging API, to enable users to interact with chat rooms in real time. The API is an AWS regional service. For a list of supported regions and Amazon IVS Chat HTTPS service endpoints, see the Amazon IVS Chat information on the Amazon IVS page in the AWS General Reference. This document describes HTTP operations. There is a separate messaging API for managing Chat resources; see the Amazon IVS Chat Messaging API Reference. Notes on terminology: You create service applications using the Amazon IVS Chat API. We refer to these as applications. You create front-end client applications (browser and Android/iOS apps) using the Amazon IVS Chat Messaging API. We refer to these as clients. Resources The following resources are part of Amazon IVS Chat: LoggingConfiguration \u2014 A configuration that allows customers to store and record sent messages in a chat room. See the Logging Configuration endpoints for more information. Room \u2014 The central Amazon IVS Chat resource through which clients connect to and exchange chat messages. See the Room endpoints for more information. Tagging A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging AWS Resources for more information, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS Chat has no service-specific constraints beyond what is documented there. Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags). The Amazon IVS Chat API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Room. At most 50 tags can be applied to a resource. API Access Security Your Amazon IVS Chat applications (service applications and clients) must be authenticated and authorized to access Amazon IVS Chat resources. Note the differences between these concepts: Authentication is about verifying identity. Requests to the Amazon IVS Chat API must be signed to verify your identity. Authorization is about granting permissions. Your IAM roles need to have permissions for Amazon IVS Chat API requests. Users (viewers) connect to a room using secure access tokens that you create using the CreateChatToken endpoint through the AWS SDK. You call CreateChatToken for every user\u2019s chat session, passing identity and authorization information about the user. Signing API Requests HTTP API requests must be signed with an AWS SigV4 signature using your AWS security credentials. The AWS Command Line Interface (CLI) and the AWS SDKs take care of signing the underlying API calls for you. However, if your application calls the Amazon IVS Chat HTTP API directly, it\u2019s your responsibility to sign the requests. You generate a signature using valid AWS credentials for an IAM role that has permission to perform the requested action. For example, DeleteMessage requests must be made using an IAM role that has the ivschat:DeleteMessage permission. For more information: Authentication and generating signatures \u2014 See Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon Web Services General Reference. Managing Amazon IVS permissions \u2014 See Identity and Access Management on the Security page of the Amazon IVS User Guide. Amazon Resource Names (ARNs) ARNs uniquely identify AWS resources. An ARN is required when you need to specify a resource unambiguously across all of AWS, such as in IAM policies and API calls. For more information, see Amazon Resource Names in the AWS General Reference', + 'Introduction The Amazon IVS Chat control-plane API enables you to create and manage Amazon IVS Chat resources. You also need to integrate with the Amazon IVS Chat Messaging API, to enable users to interact with chat rooms in real time. The API is an AWS regional service. For a list of supported regions and Amazon IVS Chat HTTPS service endpoints, see the Amazon IVS Chat information on the Amazon IVS page in the AWS General Reference. This document describes HTTP operations. There is a separate messaging API for managing Chat resources; see the Amazon IVS Chat Messaging API Reference. Notes on terminology: You create service applications using the Amazon IVS Chat API. We refer to these as applications. You create front-end client applications (browser and Android/iOS apps) using the Amazon IVS Chat Messaging API. We refer to these as clients. Resources The following resources are part of Amazon IVS Chat: LoggingConfiguration \u2014 A configuration that allows customers to store and record sent messages in a chat room. See the Logging Configuration endpoints for more information. Room \u2014 The central Amazon IVS Chat resource through which clients connect to and exchange chat messages. See the Room endpoints for more information. Tagging A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Best practices and strategies in Tagging Amazon Web Services Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS Chat has no service-specific constraints beyond what is documented there. Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags). The Amazon IVS Chat API has these tag-related operations: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Room. At most 50 tags can be applied to a resource. API Access Security Your Amazon IVS Chat applications (service applications and clients) must be authenticated and authorized to access Amazon IVS Chat resources. Note the differences between these concepts: Authentication is about verifying identity. Requests to the Amazon IVS Chat API must be signed to verify your identity. Authorization is about granting permissions. Your IAM roles need to have permissions for Amazon IVS Chat API requests. Users (viewers) connect to a room using secure access tokens that you create using the CreateChatToken operation through the AWS SDK. You call CreateChatToken for every user\u2019s chat session, passing identity and authorization information about the user. Signing API Requests HTTP API requests must be signed with an AWS SigV4 signature using your AWS security credentials. The AWS Command Line Interface (CLI) and the AWS SDKs take care of signing the underlying API calls for you. However, if your application calls the Amazon IVS Chat HTTP API directly, it\u2019s your responsibility to sign the requests. You generate a signature using valid AWS credentials for an IAM role that has permission to perform the requested action. For example, DeleteMessage requests must be made using an IAM role that has the ivschat:DeleteMessage permission. For more information: Authentication and generating signatures \u2014 See Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon Web Services General Reference. Managing Amazon IVS permissions \u2014 See Identity and Access Management on the Security page of the Amazon IVS User Guide. Amazon Resource Names (ARNs) ARNs uniquely identify AWS resources. An ARN is required when you need to specify a resource unambiguously across all of AWS, such as in IAM policies and API calls. For more information, see Amazon Resource Names in the AWS General Reference', subcommands: [ { name: "create-chat-token", @@ -91,7 +91,7 @@ const completionSpec: Fig.Spec = { { name: "--tags", description: - 'Tags to attach to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS Chat has no constraints on tags beyond what is documented there', + 'Tags to attach to the resource. Array of maps, each of the form string:string (key:value). See Best practices and strategies in Tagging Amazon Web Services Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS Chat has no constraints on tags beyond what is documented there', args: { name: "map", }, @@ -154,7 +154,7 @@ const completionSpec: Fig.Spec = { { name: "--tags", description: - 'Tags to attach to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS Chat has no constraints beyond what is documented there', + 'Tags to attach to the resource. Array of maps, each of the form string:string (key:value). See Best practices and strategies in Tagging Amazon Web Services Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS Chat has no constraints beyond what is documented there', args: { name: "map", }, @@ -599,7 +599,7 @@ const completionSpec: Fig.Spec = { { name: "--tags", description: - 'Array of tags to be added or updated. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS Chat has no constraints beyond what is documented there', + 'Array of tags to be added or updated. Array of maps, each of the form string:string (key:value). See Best practices and strategies in Tagging Amazon Web Services Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS Chat has no constraints beyond what is documented there', args: { name: "map", }, @@ -638,7 +638,7 @@ const completionSpec: Fig.Spec = { { name: "--tag-keys", description: - 'Array of tags to be removed. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS Chat has no constraints beyond what is documented there', + 'Array of tags to be removed. Array of maps, each of the form string:string (key:value). See Best practices and strategies in Tagging Amazon Web Services Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS Chat has no constraints beyond what is documented there', args: { name: "list", }, diff --git a/src/aws/kafka.ts b/src/aws/kafka.ts index 3eadbfc6f1aa..4b27259cd317 100644 --- a/src/aws/kafka.ts +++ b/src/aws/kafka.ts @@ -860,7 +860,7 @@ const completionSpec: Fig.Spec = { { name: "get-bootstrap-brokers", description: - "A list of brokers that a client application can use to bootstrap", + "A list of brokers that a client application can use to bootstrap. This list doesn't necessarily include all of the brokers in the cluster. The following Python 3.6 example shows how you can use the Amazon Resource Name (ARN) of a cluster to get its bootstrap brokers. If you don't know the ARN of your cluster, you can use the ListClusters operation to get the ARNs of all the clusters in this account and Region", options: [ { name: "--cluster-arn", diff --git a/src/aws/lambda.ts b/src/aws/lambda.ts index fc95c023a0d5..7fc595b73116 100644 --- a/src/aws/lambda.ts +++ b/src/aws/lambda.ts @@ -1016,7 +1016,7 @@ const completionSpec: Fig.Spec = { { name: "add-permission", description: - "Grants an Amazon Web Service, Amazon Web Services account, or Amazon Web Services organization permission to use a function. You can apply the policy at the function level, or specify a qualifier to restrict access to a single version or alias. If you use a qualifier, the invoker must use the full Amazon Resource Name (ARN) of that version or alias to invoke the function. Note: Lambda does not support adding policies to version $LATEST. To grant permission to another account, specify the account ID as the Principal. To grant permission to an organization defined in Organizations, specify the organization ID as the PrincipalOrgID. For Amazon Web Services, the principal is a domain-style identifier that the service defines, such as s3.amazonaws.com or sns.amazonaws.com. For Amazon Web Services, you can also specify the ARN of the associated resource as the SourceArn. If you grant permission to a service principal without specifying the source, other accounts could potentially configure resources in their account to invoke your Lambda function. This operation adds a statement to a resource-based permissions policy for the function. For more information about function policies, see Using resource-based policies for Lambda", + "Grants an Amazon Web Servicesservice, Amazon Web Services account, or Amazon Web Services organization permission to use a function. You can apply the policy at the function level, or specify a qualifier to restrict access to a single version or alias. If you use a qualifier, the invoker must use the full Amazon Resource Name (ARN) of that version or alias to invoke the function. Note: Lambda does not support adding policies to version $LATEST. To grant permission to another account, specify the account ID as the Principal. To grant permission to an organization defined in Organizations, specify the organization ID as the PrincipalOrgID. For Amazon Web Servicesservices, the principal is a domain-style identifier that the service defines, such as s3.amazonaws.com or sns.amazonaws.com. For Amazon Web Servicesservices, you can also specify the ARN of the associated resource as the SourceArn. If you grant permission to a service principal without specifying the source, other accounts could potentially configure resources in their account to invoke your Lambda function. This operation adds a statement to a resource-based permissions policy for the function. For more information about function policies, see Using resource-based policies for Lambda", options: [ { name: "--function-name", @@ -1047,7 +1047,7 @@ const completionSpec: Fig.Spec = { { name: "--principal", description: - "The Amazon Web Service or Amazon Web Services account that invokes the function. If you specify a service, use SourceArn or SourceAccount to limit who can invoke the function through that service", + "The Amazon Web Servicesservice or Amazon Web Services account that invokes the function. If you specify a service, use SourceArn or SourceAccount to limit who can invoke the function through that service", args: { name: "string", generators: [generators.getPrincipal, generators.awsPrincipals], @@ -1056,7 +1056,7 @@ const completionSpec: Fig.Spec = { { name: "--source-arn", description: - "For Amazon Web Services, the ARN of the Amazon Web Services resource that invokes the function. For example, an Amazon S3 bucket or Amazon SNS topic. Note that Lambda configures the comparison using the StringLike operator", + "For Amazon Web Servicesservices, the ARN of the Amazon Web Services resource that invokes the function. For example, an Amazon S3 bucket or Amazon SNS topic. Note that Lambda configures the comparison using the StringLike operator", args: { name: "string", }, @@ -1064,7 +1064,7 @@ const completionSpec: Fig.Spec = { { name: "--source-account", description: - "For Amazon Web Service, the ID of the Amazon Web Services account that owns the resource. Use this together with SourceArn to ensure that the specified account owns the resource. It is possible for an Amazon S3 bucket to be deleted by its owner and recreated by another account", + "For Amazon Web Servicesservice, the ID of the Amazon Web Services account that owns the resource. Use this together with SourceArn to ensure that the specified account owns the resource. It is possible for an Amazon S3 bucket to be deleted by its owner and recreated by another account", args: { name: "string", generators: generators.getPrincipal, @@ -1445,6 +1445,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--kms-key-arn", + description: + "The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria. By default, Lambda does not encrypt your filter criteria object. Specify this property to encrypt data using your own customer managed key", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -1468,7 +1476,7 @@ const completionSpec: Fig.Spec = { { name: "create-function", description: - "Creates a Lambda function. To create a function, you need a deployment package and an execution role. The deployment package is a .zip file archive or container image that contains your function code. The execution role grants the function permission to use Amazon Web Services, such as Amazon CloudWatch Logs for log streaming and X-Ray for request tracing. If the deployment package is a container image, then you set the package type to Image. For a container image, the code property must include the URI of a container image in the Amazon ECR registry. You do not need to specify the handler and runtime properties. If the deployment package is a .zip file archive, then you set the package type to Zip. For a .zip file archive, the code property specifies the location of the .zip file. You must also specify the handler and runtime properties. The code in the deployment package must be compatible with the target instruction set architecture of the function (x86-64 or arm64). If you do not specify the architecture, then the default value is x86-64. When you create a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute or so. During this time, you can't invoke or modify the function. The State, StateReason, and StateReasonCode fields in the response from GetFunctionConfiguration indicate when the function is ready to invoke. For more information, see Lambda function states. A function has an unpublished version, and can have published versions and aliases. The unpublished version changes when you update your function's code and configuration. A published version is a snapshot of your function code and configuration that can't be changed. An alias is a named resource that maps to a version, and can be changed to map to a different version. Use the Publish parameter to create version 1 of your function from its initial configuration. The other parameters let you configure version-specific and function-level settings. You can modify version-specific settings later with UpdateFunctionConfiguration. Function-level settings apply to both the unpublished and published versions of the function, and include tags (TagResource) and per-function concurrency limits (PutFunctionConcurrency). You can use code signing if your deployment package is a .zip file archive. To enable code signing for this function, specify the ARN of a code-signing configuration. When a user attempts to deploy a code package with UpdateFunctionCode, Lambda checks that the code package has a valid signature from a trusted publisher. The code-signing configuration includes set of signing profiles, which define the trusted publishers for this function. If another Amazon Web Services account or an Amazon Web Service invokes your function, use AddPermission to grant permission by creating a resource-based Identity and Access Management (IAM) policy. You can grant permissions at the function level, on a version, or on an alias. To invoke your function directly, use Invoke. To invoke your function in response to events in other Amazon Web Services, create an event source mapping (CreateEventSourceMapping), or configure a function trigger in the other service. For more information, see Invoking Lambda functions", + "Creates a Lambda function. To create a function, you need a deployment package and an execution role. The deployment package is a .zip file archive or container image that contains your function code. The execution role grants the function permission to use Amazon Web Servicesservices, such as Amazon CloudWatch Logs for log streaming and X-Ray for request tracing. If the deployment package is a container image, then you set the package type to Image. For a container image, the code property must include the URI of a container image in the Amazon ECR registry. You do not need to specify the handler and runtime properties. If the deployment package is a .zip file archive, then you set the package type to Zip. For a .zip file archive, the code property specifies the location of the .zip file. You must also specify the handler and runtime properties. The code in the deployment package must be compatible with the target instruction set architecture of the function (x86-64 or arm64). If you do not specify the architecture, then the default value is x86-64. When you create a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute or so. During this time, you can't invoke or modify the function. The State, StateReason, and StateReasonCode fields in the response from GetFunctionConfiguration indicate when the function is ready to invoke. For more information, see Lambda function states. A function has an unpublished version, and can have published versions and aliases. The unpublished version changes when you update your function's code and configuration. A published version is a snapshot of your function code and configuration that can't be changed. An alias is a named resource that maps to a version, and can be changed to map to a different version. Use the Publish parameter to create version 1 of your function from its initial configuration. The other parameters let you configure version-specific and function-level settings. You can modify version-specific settings later with UpdateFunctionConfiguration. Function-level settings apply to both the unpublished and published versions of the function, and include tags (TagResource) and per-function concurrency limits (PutFunctionConcurrency). You can use code signing if your deployment package is a .zip file archive. To enable code signing for this function, specify the ARN of a code-signing configuration. When a user attempts to deploy a code package with UpdateFunctionCode, Lambda checks that the code package has a valid signature from a trusted publisher. The code-signing configuration includes set of signing profiles, which define the trusted publishers for this function. If another Amazon Web Services account or an Amazon Web Servicesservice invokes your function, use AddPermission to grant permission by creating a resource-based Identity and Access Management (IAM) policy. You can grant permissions at the function level, on a version, or on an alias. To invoke your function directly, use Invoke. To invoke your function in response to events in other Amazon Web Servicesservices, create an event source mapping (CreateEventSourceMapping), or configure a function trigger in the other service. For more information, see Invoking Lambda functions", options: [ { name: "--function-name", @@ -1872,7 +1880,7 @@ const completionSpec: Fig.Spec = { { name: "delete-function", description: - "Deletes a Lambda function. To delete a specific function version, use the Qualifier parameter. Otherwise, all versions and aliases are deleted. This doesn't require the user to have explicit permissions for DeleteAlias. To delete Lambda event source mappings that invoke a function, use DeleteEventSourceMapping. For Amazon Web Services and resources that invoke your function directly, delete the trigger in the service where you originally configured it", + "Deletes a Lambda function. To delete a specific function version, use the Qualifier parameter. Otherwise, all versions and aliases are deleted. This doesn't require the user to have explicit permissions for DeleteAlias. To delete Lambda event source mappings that invoke a function, use DeleteEventSourceMapping. For Amazon Web Servicesservices and resources that invoke your function directly, delete the trigger in the service where you originally configured it", options: [ { name: "--function-name", @@ -2142,6 +2150,45 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-resource-policy", + description: "Deletes a resource-based policy from a function", + options: [ + { + name: "--resource-arn", + description: + "The Amazon Resource Name (ARN) of the function you want to delete the policy from. You can use either a qualified or an unqualified ARN, but the value you specify must be a complete ARN and wildcard characters are not accepted", + args: { + name: "string", + }, + }, + { + name: "--revision-id", + description: + "Delete the existing policy only if its revision ID matches the string you specify. To find the revision ID of the policy currently attached to your function, use the GetResourcePolicy action", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-account-settings", description: @@ -2739,6 +2786,68 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-public-access-block-config", + description: "Retrieve the public-access settings for a function", + options: [ + { + name: "--resource-arn", + description: + "The Amazon Resource Name (ARN) of the function you want to retrieve public-access settings for", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-resource-policy", + description: "Retrieves the resource-based policy attached to a function", + options: [ + { + name: "--resource-arn", + description: + "The Amazon Resource Name (ARN) of the function you want to retrieve the policy for. You can use either a qualified or an unqualified ARN, but the value you specify must be a complete ARN and wildcard characters are not accepted", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-runtime-management-config", description: @@ -4074,6 +4183,94 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "put-public-access-block-config", + description: + "Configure your function's public-access settings. To control public access to a Lambda function, you can choose whether to allow the creation of resource-based policies that allow public access to that function. You can also block public access to a function, even if it has an existing resource-based policy that allows it", + options: [ + { + name: "--resource-arn", + description: + "The Amazon Resource Name (ARN) of the function you want to configure public-access settings for. Public-access settings are applied at the function level, so you can't apply different settings to function versions or aliases", + args: { + name: "string", + }, + }, + { + name: "--public-access-block-config", + description: + "An object defining the public-access settings you want to apply. To block the creation of resource-based policies that would grant public access to your function, set BlockPublicPolicy to true. To allow the creation of resource-based policies that would grant public access to your function, set BlockPublicPolicy to false. To block public access to your function, even if its resource-based policy allows it, set RestrictPublicResource to true. To allow public access to a function with a resource-based policy that permits it, set RestrictPublicResource to false. The default setting for both BlockPublicPolicy and RestrictPublicResource is true", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "put-resource-policy", + description: + "Adds a resource-based policy to a function. You can use resource-based policies to grant access to other Amazon Web Services accounts, organizations, or services. Resource-based policies apply to a single function, version, or alias. Adding a resource-based policy using this API action replaces any existing policy you've previously created. This means that if you've previously added resource-based permissions to a function using the AddPermission action, those permissions will be overwritten by your new policy", + options: [ + { + name: "--resource-arn", + description: + "The Amazon Resource Name (ARN) of the function you want to add the policy to. You can use either a qualified or an unqualified ARN, but the value you specify must be a complete ARN and wildcard characters are not accepted", + args: { + name: "string", + }, + }, + { + name: "--policy", + description: + "The JSON resource-based policy you want to add to your function. To learn more about creating resource-based policies for controlling access to Lambda, see Working with resource-based IAM policies in Lambda in the Lambda Developer Guide", + args: { + name: "string", + }, + }, + { + name: "--revision-id", + description: + "Replace the existing policy only if its revision ID matches the string you specify. To find the revision ID of the policy currently attached to your function, use the GetResourcePolicy action", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "put-runtime-management-config", description: @@ -4192,7 +4389,7 @@ const completionSpec: Fig.Spec = { { name: "remove-permission", description: - "Revokes function-use permission from an Amazon Web Service or another Amazon Web Services account. You can get the ID of the statement from the output of GetPolicy", + "Revokes function-use permission from an Amazon Web Servicesservice or another Amazon Web Services account. You can get the ID of the statement from the output of GetPolicy", options: [ { name: "--function-name", @@ -4593,6 +4790,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--kms-key-arn", + description: + "The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria. By default, Lambda does not encrypt your filter criteria object. Specify this property to encrypt data using your own customer managed key", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -4731,7 +4936,7 @@ const completionSpec: Fig.Spec = { { name: "update-function-configuration", description: - "Modify the version-specific settings of a Lambda function. When you update a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute. During this time, you can't modify the function, but you can still invoke it. The LastUpdateStatus, LastUpdateStatusReason, and LastUpdateStatusReasonCode fields in the response from GetFunctionConfiguration indicate when the update is complete and the function is processing events with the new configuration. For more information, see Lambda function states. These settings can vary between versions of a function and are locked when you publish a version. You can't modify the configuration of a published version, only the unpublished version. To configure function concurrency, use PutFunctionConcurrency. To grant invoke permissions to an Amazon Web Services account or Amazon Web Service, use AddPermission", + "Modify the version-specific settings of a Lambda function. When you update a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute. During this time, you can't modify the function, but you can still invoke it. The LastUpdateStatus, LastUpdateStatusReason, and LastUpdateStatusReasonCode fields in the response from GetFunctionConfiguration indicate when the update is complete and the function is processing events with the new configuration. For more information, see Lambda function states. These settings can vary between versions of a function and are locked when you publish a version. You can't modify the configuration of a published version, only the unpublished version. To configure function concurrency, use PutFunctionConcurrency. To grant invoke permissions to an Amazon Web Services account or Amazon Web Servicesservice, use AddPermission", options: [ { name: "--function-name", diff --git a/src/aws/logs.ts b/src/aws/logs.ts index 8d358dc99fe7..6492cc2d2e4b 100644 --- a/src/aws/logs.ts +++ b/src/aws/logs.ts @@ -103,6 +103,30 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--record-fields", + description: + "The list of record fields to be delivered to the destination, in order. If the delivery\u2019s log source has mandatory fields, they must be included in this list", + args: { + name: "list", + }, + }, + { + name: "--field-delimiter", + description: + "The field delimiter to use between record fields when the final output format of a delivery is in Plain, W3C, or Raw format", + args: { + name: "string", + }, + }, + { + name: "--s3-delivery-configuration", + description: + "This structure contains parameters that are valid only when the delivery\u2019s delivery destination is an S3 bucket", + args: { + name: "structure", + }, + }, { name: "--tags", description: @@ -926,6 +950,102 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "describe-configuration-templates", + description: + "Use this operation to return the valid and default values that are used when creating delivery sources, delivery destinations, and deliveries. For more information about deliveries, see CreateDelivery", + options: [ + { + name: "--service", + description: + "Use this parameter to filter the response to include only the configuration templates that apply to the Amazon Web Services service that you specify here", + args: { + name: "string", + }, + }, + { + name: "--log-types", + description: + "Use this parameter to filter the response to include only the configuration templates that apply to the log types that you specify here", + args: { + name: "list", + }, + }, + { + name: "--resource-types", + description: + "Use this parameter to filter the response to include only the configuration templates that apply to the resource types that you specify here", + args: { + name: "list", + }, + }, + { + name: "--delivery-destination-types", + description: + "Use this parameter to filter the response to include only the configuration templates that apply to the delivery destination types that you specify here", + args: { + name: "list", + }, + }, + { + name: "--next-token", + description: + "The token for the next set of items to return. The token expires after 24 hours", + args: { + name: "string", + }, + }, + { + name: "--limit", + description: + "Use this parameter to limit the number of configuration templates that are returned in the response", + args: { + name: "integer", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "describe-deliveries", description: @@ -1911,7 +2031,7 @@ const completionSpec: Fig.Spec = { { name: "--log-stream-names", description: - "Filters the results to only logs from the log streams in this list. If you specify a value for both logStreamNamePrefix and logStreamNames, the action returns an InvalidParameterException error", + "Filters the results to only logs from the log streams in this list. If you specify a value for both logStreamNames and logStreamNamePrefix, the action returns an InvalidParameterException error", args: { name: "list", }, @@ -1919,7 +2039,7 @@ const completionSpec: Fig.Spec = { { name: "--log-stream-name-prefix", description: - "Filters the results to include only events from log streams that have names starting with this prefix. If you specify a value for both logStreamNamePrefix and logStreamNames, but the value for logStreamNamePrefix does not match any log stream names specified in logStreamNames, the action returns an InvalidParameterException error", + "Filters the results to include only events from log streams that have names starting with this prefix. If you specify a value for both logStreamNamePrefix and logStreamNames, the action returns an InvalidParameterException error", args: { name: "string", }, @@ -2655,7 +2775,7 @@ const completionSpec: Fig.Spec = { { name: "put-account-policy", description: - "Creates an account-level data protection policy or subscription filter policy that applies to all log groups or a subset of log groups in the account. Data protection policy A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy. Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked. If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked. By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command. For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking. To use the PutAccountPolicy operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions. The PutAccountPolicy operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked. Subscription filter policy A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery. An Firehose data stream in the same account as the subscription policy, for same-account delivery. A Lambda function in the same account as the subscription policy, for same-account delivery. A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations. Each account can have one account-level subscription filter policy. If you are updating an existing filter, you must specify the correct name in PolicyName. To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole permission", + "Creates an account-level data protection policy or subscription filter policy that applies to all log groups or a subset of log groups in the account. Data protection policy A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy. Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked. If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked. By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command. For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking. To use the PutAccountPolicy operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions. The PutAccountPolicy operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked. Subscription filter policy A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery. An Firehose data stream in the same account as the subscription policy, for same-account delivery. A Lambda function in the same account as the subscription policy, for same-account delivery. A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations. Each account can have one account-level subscription filter policy per Region. If you are updating an existing filter, you must specify the correct name in PolicyName. To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole permission", options: [ { name: "--policy-name", @@ -2668,7 +2788,7 @@ const completionSpec: Fig.Spec = { { name: "--policy-document", description: - 'Specify the policy, in JSON. Data protection policy A data protection policy must include two JSON blocks: The first block must include both a DataIdentifer array and an Operation property with an Audit action. The DataIdentifer array lists the types of sensitive data that you want to mask. For more information about the available options, see Types of data that you can mask. The Operation property with an Audit action is required to find the sensitive data terms. This Audit action must contain a FindingsDestination object. You can optionally use that FindingsDestination object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Firehose streams, and S3 buckets, they must already exist. The second block must include both a DataIdentifer array and an Operation property with an Deidentify action. The DataIdentifer array must exactly match the DataIdentifer array in the first block of the policy. The Operation property with the Deidentify action is what actually masks the data, and it must contain the "MaskConfig": {} object. The "MaskConfig": {} object must be empty. For an example data protection policy, see the Examples section on this page. The contents of the two DataIdentifer arrays must match exactly. In addition to the two JSON blocks, the policyDocument can also include Name, Description, and Version fields. The Name is different than the operation\'s policyName parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch. The JSON specified in policyDocument can be up to 30,720 characters long. Subscription filter policy A subscription filter policy can include the following attributes in a JSON block: DestinationArn The ARN of the destination to deliver log events to. Supported destinations are: An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery. An Firehose data stream in the same account as the subscription policy, for same-account delivery. A Lambda function in the same account as the subscription policy, for same-account delivery. A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations. RoleArn The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don\'t need to provide the ARN when you are working with a logical destination for cross-account delivery. FilterPattern A filter pattern for subscribing to a filtered stream of log events. DistributionThe method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to Random for a more even distribution. This property is only applicable when the destination is an Kinesis Data Streams data stream', + 'Specify the policy, in JSON. Data protection policy A data protection policy must include two JSON blocks: The first block must include both a DataIdentifer array and an Operation property with an Audit action. The DataIdentifer array lists the types of sensitive data that you want to mask. For more information about the available options, see Types of data that you can mask. The Operation property with an Audit action is required to find the sensitive data terms. This Audit action must contain a FindingsDestination object. You can optionally use that FindingsDestination object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Firehose streams, and S3 buckets, they must already exist. The second block must include both a DataIdentifer array and an Operation property with an Deidentify action. The DataIdentifer array must exactly match the DataIdentifer array in the first block of the policy. The Operation property with the Deidentify action is what actually masks the data, and it must contain the "MaskConfig": {} object. The "MaskConfig": {} object must be empty. For an example data protection policy, see the Examples section on this page. The contents of the two DataIdentifer arrays must match exactly. In addition to the two JSON blocks, the policyDocument can also include Name, Description, and Version fields. The Name is different than the operation\'s policyName parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch. The JSON specified in policyDocument can be up to 30,720 characters long. Subscription filter policy A subscription filter policy can include the following attributes in a JSON block: DestinationArn The ARN of the destination to deliver log events to. Supported destinations are: An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery. An Firehose data stream in the same account as the subscription policy, for same-account delivery. A Lambda function in the same account as the subscription policy, for same-account delivery. A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations. RoleArn The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don\'t need to provide the ARN when you are working with a logical destination for cross-account delivery. FilterPattern A filter pattern for subscribing to a filtered stream of log events. Distribution The method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to Random for a more even distribution. This property is only applicable when the destination is an Kinesis Data Streams data stream', args: { name: "string", }, @@ -2873,7 +2993,7 @@ const completionSpec: Fig.Spec = { { name: "--log-type", description: - "Defines the type of log that the source is sending. For Amazon CodeWhisperer, the valid value is EVENT_LOGS. For IAM Identity Centerr, the valid value is ERROR_LOGS. For Amazon WorkMail, the valid values are ACCESS_CONTROL_LOGS, AUTHENTICATION_LOGS, WORKMAIL_AVAILABILITY_PROVIDER_LOGS, and WORKMAIL_MAILBOX_ACCESS_LOGS", + "Defines the type of log that the source is sending. For Amazon Bedrock, the valid value is APPLICATION_LOGS. For Amazon CodeWhisperer, the valid value is EVENT_LOGS. For IAM Identity Center, the valid value is ERROR_LOGS. For Amazon WorkMail, the valid values are ACCESS_CONTROL_LOGS, AUTHENTICATION_LOGS, WORKMAIL_AVAILABILITY_PROVIDER_LOGS, and WORKMAIL_MAILBOX_ACCESS_LOGS", args: { name: "string", }, @@ -3043,6 +3163,13 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--entity", + description: "Reserved for internal use", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: @@ -3065,7 +3192,7 @@ const completionSpec: Fig.Spec = { { name: "put-metric-filter", description: - "Creates or updates a metric filter and associates it with the specified log group. With metric filters, you can configure rules to extract metric data from log events ingested through PutLogEvents. The maximum number of metric filters that can be associated with a log group is 100. When you create a metric filter, you can also optionally assign a unit and dimensions to the metric that is created. Metrics extracted from log events are charged as custom metrics. To prevent unexpected high charges, do not specify high-cardinality fields such as IPAddress or requestID as dimensions. Each different value found for a dimension is treated as a separate metric and accrues charges as a separate custom metric. CloudWatch Logs might disable a metric filter if it generates 1,000 different name/value pairs for your specified dimensions within one hour. You can also set up a billing alarm to alert you if your charges are higher than expected. For more information, see Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges", + "Creates or updates a metric filter and associates it with the specified log group. With metric filters, you can configure rules to extract metric data from log events ingested through PutLogEvents. The maximum number of metric filters that can be associated with a log group is 100. Using regular expressions to create metric filters is supported. For these filters, there is a quotas of quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in metric filters, see Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail. When you create a metric filter, you can also optionally assign a unit and dimensions to the metric that is created. Metrics extracted from log events are charged as custom metrics. To prevent unexpected high charges, do not specify high-cardinality fields such as IPAddress or requestID as dimensions. Each different value found for a dimension is treated as a separate metric and accrues charges as a separate custom metric. CloudWatch Logs might disable a metric filter if it generates 1,000 different name/value pairs for your specified dimensions within one hour. You can also set up a billing alarm to alert you if your charges are higher than expected. For more information, see Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges", options: [ { name: "--log-group-name", @@ -3261,7 +3388,7 @@ const completionSpec: Fig.Spec = { { name: "put-subscription-filter", description: - "Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery. A logical destination created with PutDestination that belongs to a different account, for cross-account delivery. We currently support Kinesis Data Streams and Firehose as logical destinations. An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery. An Lambda function that belongs to the same account as the subscription filter, for same-account delivery. Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName. To perform a PutSubscriptionFilter operation for any destination except a Lambda function, you must also have the iam:PassRole permission", + "Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery. A logical destination created with PutDestination that belongs to a different account, for cross-account delivery. We currently support Kinesis Data Streams and Firehose as logical destinations. An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery. An Lambda function that belongs to the same account as the subscription filter, for same-account delivery. Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName. Using regular expressions to create subscription filters is supported. For these filters, there is a quotas of quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in subscription filters, see Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail. To perform a PutSubscriptionFilter operation for any destination except a Lambda function, you must also have the iam:PassRole permission", options: [ { name: "--log-group-name", @@ -3741,6 +3868,61 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "update-delivery-configuration", + description: + "Use this operation to update the configuration of a delivery to change either the S3 path pattern or the format of the delivered logs. You can't use this operation to change the source or destination of the delivery", + options: [ + { + name: "--id", + description: "The ID of the delivery to be updated by this request", + args: { + name: "string", + }, + }, + { + name: "--record-fields", + description: + "The list of record fields to be delivered to the destination, in order. If the delivery\u2019s log source has mandatory fields, they must be included in this list", + args: { + name: "list", + }, + }, + { + name: "--field-delimiter", + description: + "The field delimiter to use between record fields when the final output format of a delivery is in Plain, W3C, or Raw format", + args: { + name: "string", + }, + }, + { + name: "--s3-delivery-configuration", + description: + "This structure contains parameters that are valid only when the delivery\u2019s delivery destination is an S3 bucket", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "update-log-anomaly-detector", description: "Updates an existing log anomaly detector", diff --git a/src/aws/mediaconnect.ts b/src/aws/mediaconnect.ts index 7bcd08dab89c..c4923d4802fc 100644 --- a/src/aws/mediaconnect.ts +++ b/src/aws/mediaconnect.ts @@ -377,6 +377,13 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--source-monitoring-config", + description: "The settings for source monitoring", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: @@ -670,6 +677,36 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "describe-flow-source-thumbnail", + description: "Displays the thumbnail details of a flow's source stream", + options: [ + { + name: "--flow-arn", + description: "The Amazon Resource Name (ARN) of the flow", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "describe-gateway", description: @@ -1981,6 +2018,13 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--source-monitoring-config", + description: "The settings for source monitoring", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/mediaconvert.ts b/src/aws/mediaconvert.ts index 0ca7cb628ea5..02d0e698b243 100644 --- a/src/aws/mediaconvert.ts +++ b/src/aws/mediaconvert.ts @@ -102,6 +102,14 @@ const completionSpec: Fig.Spec = { name: "list", }, }, + { + name: "--job-engine-version", + description: + "Use Job engine versions to run jobs for your production workflow on one version, while you test and validate the latest version. To specify a Job engine version: Enter a date in a YYYY-MM-DD format. For a list of valid Job engine versions, submit a ListVersions request. To not specify a Job engine version: Leave blank", + args: { + name: "string", + }, + }, { name: "--job-template", description: @@ -1162,6 +1170,70 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-versions", + description: + "Retrieve a JSON array of all available Job engine versions and the date they expire", + options: [ + { + name: "--max-results", + description: + "Optional. Number of valid Job engine versions, up to twenty, that will be returned at one time", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: + "Optional. Use this string, provided with the response to a previous request, to request the next batch of Job engine versions", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "put-policy", description: diff --git a/src/aws/medialive.ts b/src/aws/medialive.ts index 81b170204678..eaaf70b96d49 100644 --- a/src/aws/medialive.ts +++ b/src/aws/medialive.ts @@ -373,6 +373,13 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--anywhere-settings", + description: "The Elemental Anywhere settings for this channel", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: @@ -487,6 +494,21 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--input-network-location", + description: + "The location of this input. AWS, for an input existing in the AWS Cloud, On-Prem for\nan input in a customer network", + args: { + name: "string", + }, + }, + { + name: "--multicast-settings", + description: "Multicast Input settings", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: @@ -1870,7 +1892,7 @@ const completionSpec: Fig.Spec = { { name: "--codec", description: - "Filter by codec, 'AVC', 'HEVC', 'MPEG2', 'AUDIO', or 'LINK'", + "Filter by codec, 'AVC', 'HEVC', 'MPEG2', 'AUDIO', 'LINK', or 'AV1'", args: { name: "string", }, @@ -2000,7 +2022,7 @@ const completionSpec: Fig.Spec = { { name: "--codec", description: - "Filter by codec, 'AVC', 'HEVC', 'MPEG2', 'AUDIO', or 'LINK'", + "Filter by codec, 'AVC', 'HEVC', 'MPEG2', 'AUDIO', 'LINK', or 'AV1'", args: { name: "string", }, @@ -2800,6 +2822,13 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--multicast-settings", + description: "Multicast Input settings", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: @@ -4582,220 +4611,1508 @@ const completionSpec: Fig.Spec = { ], }, { - name: "wait", + name: "create-channel-placement-group", description: - "Wait until a particular condition is satisfied. Each subcommand polls an API until the listed requirement is met", - subcommands: [ + "Create a ChannelPlacementGroup in the specified Cluster. As part of the create operation, you specify the Nodes to attach the group to.After you create a ChannelPlacementGroup, you add Channels to the group (you do this by modifying the Channels to add them to a specific group). You now have an association of Channels to ChannelPlacementGroup, and ChannelPlacementGroup to Nodes. This association means that all the Channels in the group are able to run on any of the Nodes associated with the group", + options: [ { - name: "channel-created", + name: "--cluster-id", + description: "The ID of the cluster", + args: { + name: "string", + }, + }, + { + name: "--name", description: - "Wait until a channel has been created It will poll every 3 seconds until a successful state has been reached. This will exit with a return code of 255 after 5 failed checks", - options: [ - { - name: "--channel-id", - description: "Channel ID", - args: { - name: "string", - }, - }, - { - name: "--cli-input-json", - description: - "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", - args: { - name: "string", - }, - }, - { - name: "--generate-cli-skeleton", - description: - "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", - args: { - name: "string", - suggestions: ["input", "output"], - }, - }, - ], + "Specify a name that is unique in the Cluster. You can't change the name. Names are case-sensitive", + args: { + name: "string", + }, }, { - name: "channel-deleted", + name: "--nodes", description: - "Wait until a channel has been deleted It will poll every 5 seconds until a successful state has been reached. This will exit with a return code of 255 after 84 failed checks", - options: [ - { - name: "--channel-id", - description: "Channel ID", - args: { - name: "string", - }, - }, - { - name: "--cli-input-json", - description: - "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", - args: { - name: "string", - }, - }, - { - name: "--generate-cli-skeleton", - description: - "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", - args: { - name: "string", - suggestions: ["input", "output"], - }, - }, - ], + "An array of one ID for the Node that you want to associate with the ChannelPlacementGroup. (You can't associate more than one Node with the ChannelPlacementGroup.) The Node and the ChannelPlacementGroup must be in the same Cluster", + args: { + name: "list", + }, }, { - name: "channel-running", + name: "--request-id", description: - "Wait until a channel is running It will poll every 5 seconds until a successful state has been reached. This will exit with a return code of 255 after 120 failed checks", - options: [ - { - name: "--channel-id", - description: "Channel ID", - args: { - name: "string", - }, - }, - { - name: "--cli-input-json", - description: - "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", - args: { - name: "string", - }, - }, - { - name: "--generate-cli-skeleton", - description: - "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", - args: { - name: "string", - suggestions: ["input", "output"], - }, - }, - ], + "An ID that you assign to a create request. This ID ensures idempotency when creating resources. the request", + args: { + name: "string", + }, }, { - name: "channel-stopped", + name: "--tags", + description: "A collection of key-value pairs", + args: { + name: "map", + }, + }, + { + name: "--cli-input-json", description: - "Wait until a channel has is stopped It will poll every 5 seconds until a successful state has been reached. This will exit with a return code of 255 after 60 failed checks", - options: [ - { - name: "--channel-id", - description: "Channel ID", - args: { - name: "string", - }, - }, - { - name: "--cli-input-json", - description: - "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", - args: { - name: "string", - }, - }, - { - name: "--generate-cli-skeleton", - description: - "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", - args: { - name: "string", - suggestions: ["input", "output"], - }, - }, - ], + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, }, { - name: "input-attached", + name: "--generate-cli-skeleton", description: - "Wait until an input has been attached It will poll every 5 seconds until a successful state has been reached. This will exit with a return code of 255 after 20 failed checks", - options: [ - { - name: "--input-id", - description: "Unique ID of the input", - args: { - name: "string", - }, - }, - { - name: "--cli-input-json", - description: - "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", - args: { - name: "string", - }, - }, - { - name: "--generate-cli-skeleton", - description: - "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", - args: { - name: "string", - suggestions: ["input", "output"], - }, - }, - ], + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, }, + ], + }, + { + name: "create-cluster", + description: "Create a new Cluster", + options: [ { - name: "input-deleted", + name: "--cluster-type", description: - "Wait until an input has been deleted It will poll every 5 seconds until a successful state has been reached. This will exit with a return code of 255 after 20 failed checks", - options: [ - { - name: "--input-id", - description: "Unique ID of the input", - args: { - name: "string", - }, - }, - { - name: "--cli-input-json", - description: - "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", - args: { - name: "string", - }, - }, - { - name: "--generate-cli-skeleton", - description: - "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", - args: { - name: "string", - suggestions: ["input", "output"], - }, - }, - ], + "Specify a type. All the Nodes that you later add to this Cluster must be this type of hardware. One Cluster instance can't contain different hardware types. You won't be able to change this parameter after you create the Cluster", + args: { + name: "string", + }, }, { - name: "input-detached", + name: "--instance-role-arn", description: - "Wait until an input has been detached It will poll every 5 seconds until a successful state has been reached. This will exit with a return code of 255 after 84 failed checks", - options: [ - { - name: "--input-id", - description: "Unique ID of the input", - args: { - name: "string", - }, - }, - { - name: "--cli-input-json", - description: - "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", - args: { - name: "string", - }, - }, - { - name: "--generate-cli-skeleton", - description: - "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + "The ARN of the IAM role for the Node in this Cluster. The role must include all the operations that you expect these Node to perform. If necessary, create a role in IAM, then attach it here", + args: { + name: "string", + }, + }, + { + name: "--name", + description: + "Specify a name that is unique in the AWS account. We recommend that you assign a name that hints at the types of Nodes in the Cluster. Names are case-sensitive", + args: { + name: "string", + }, + }, + { + name: "--network-settings", + description: + "Network settings that connect the Nodes in the Cluster to one or more of the Networks that the Cluster is associated with", + args: { + name: "structure", + }, + }, + { + name: "--request-id", + description: "The unique ID of the request", + args: { + name: "string", + }, + }, + { + name: "--tags", + description: "A collection of key-value pairs", + args: { + name: "map", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "create-network", + description: + "Create as many Networks as you need. You will associate one or more Clusters with each Network.Each Network provides MediaLive Anywhere with required information about the network in your organization that you are using for video encoding using MediaLive", + options: [ + { + name: "--ip-pools", + description: + "An array of IpPoolCreateRequests that identify a collection of IP addresses in your network that you want to reserve for use in MediaLive Anywhere. MediaLiveAnywhere uses these IP addresses for Push inputs (in both Bridge and NATnetworks) and for output destinations (only in Bridge networks). EachIpPoolUpdateRequest specifies one CIDR block", + args: { + name: "list", + }, + }, + { + name: "--name", + description: + "Specify a name that is unique in the AWS account. We recommend that you assign a name that hints at the type of traffic on the network. Names are case-sensitive", + args: { + name: "string", + }, + }, + { + name: "--request-id", + description: + "An ID that you assign to a create request. This ID ensures idempotency when creating resources", + args: { + name: "string", + }, + }, + { + name: "--routes", + description: + "An array of routes that MediaLive Anywhere needs to know about in order to route encoding traffic", + args: { + name: "list", + }, + }, + { + name: "--tags", + description: "A collection of key-value pairs", + args: { + name: "map", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "create-node", + description: + "Create a Node in the specified Cluster. You can also create Nodes using the CreateNodeRegistrationScript. Note that you can't move a Node to another Cluster", + options: [ + { + name: "--cluster-id", + description: "The ID of the cluster", + args: { + name: "string", + }, + }, + { + name: "--name", + description: "The user-specified name of the Node to be created", + args: { + name: "string", + }, + }, + { + name: "--node-interface-mappings", + description: "Documentation update needed", + args: { + name: "list", + }, + }, + { + name: "--request-id", + description: + "An ID that you assign to a create request. This ID ensures idempotency when creating resources", + args: { + name: "string", + }, + }, + { + name: "--role", + description: + "The initial role of the Node in the Cluster. ACTIVE means the Node is available for encoding. BACKUP means the Node is a redundant Node and might get used if an ACTIVE Node fails", + args: { + name: "string", + }, + }, + { + name: "--tags", + description: "A collection of key-value pairs", + args: { + name: "map", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "create-node-registration-script", + description: + "Create the Register Node script for all the nodes intended for a specific Cluster. You will then run the script on each hardware unit that is intended for that Cluster. The script creates a Node in the specified Cluster. It then binds the Node to this hardware unit, and activates the node hardware for use with MediaLive Anywhere", + options: [ + { + name: "--cluster-id", + description: "The ID of the cluster", + args: { + name: "string", + }, + }, + { + name: "--id", + description: + "If you're generating a re-registration script for an already existing node, this is where you provide the id", + args: { + name: "string", + }, + }, + { + name: "--name", + description: + "Specify a pattern for MediaLive Anywhere to use to assign a name to each Node in the Cluster. The pattern can include the variables $hn (hostname of the node hardware) and $ts for the date and time that the Node is created, in UTC (for example, 2024-08-20T23:35:12Z)", + args: { + name: "string", + }, + }, + { + name: "--node-interface-mappings", + description: "Documentation update needed", + args: { + name: "list", + }, + }, + { + name: "--request-id", + description: + "An ID that you assign to a create request. This ID ensures idempotency when creating resources", + args: { + name: "string", + }, + }, + { + name: "--role", + description: + "The initial role of the Node in the Cluster. ACTIVE means the Node is available for encoding. BACKUP means the Node is a redundant Node and might get used if an ACTIVE Node fails", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "delete-channel-placement-group", + description: + "Delete the specified ChannelPlacementGroup that exists in the specified Cluster", + options: [ + { + name: "--channel-placement-group-id", + description: "The ID of the channel placement group", + args: { + name: "string", + }, + }, + { + name: "--cluster-id", + description: "The ID of the cluster", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "delete-cluster", + description: "Delete a Cluster. The Cluster must be idle", + options: [ + { + name: "--cluster-id", + description: "The ID of the cluster", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "delete-network", + description: + "Delete a Network. The Network must have no resources associated with it", + options: [ + { + name: "--network-id", + description: "The ID of the network", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "delete-node", + description: "Delete a Node. The Node must be IDLE", + options: [ + { + name: "--cluster-id", + description: "The ID of the cluster", + args: { + name: "string", + }, + }, + { + name: "--node-id", + description: "The ID of the node", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "describe-channel-placement-group", + description: "Get details about a ChannelPlacementGroup", + options: [ + { + name: "--channel-placement-group-id", + description: "The ID of the channel placement group", + args: { + name: "string", + }, + }, + { + name: "--cluster-id", + description: "The ID of the cluster", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "describe-cluster", + description: "Get details about a Cluster", + options: [ + { + name: "--cluster-id", + description: "The ID of the cluster", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "describe-network", + description: "Get details about a Network", + options: [ + { + name: "--network-id", + description: "The ID of the network", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "describe-node", + description: "Get details about a Node in the specified Cluster", + options: [ + { + name: "--cluster-id", + description: "The ID of the cluster", + args: { + name: "string", + }, + }, + { + name: "--node-id", + description: "The ID of the node", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-channel-placement-groups", + description: + "Retrieve the list of ChannelPlacementGroups in the specified Cluster", + options: [ + { + name: "--cluster-id", + description: "The ID of the cluster", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: "The maximum number of items to return", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: "The token to retrieve the next page of results", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-clusters", + description: "Retrieve the list of Clusters", + options: [ + { + name: "--max-results", + description: "The maximum number of items to return", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: "The token to retrieve the next page of results", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-networks", + description: "Retrieve the list of Networks", + options: [ + { + name: "--max-results", + description: "The maximum number of items to return", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: "The token to retrieve the next page of results", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-nodes", + description: "Retrieve the list of Nodes", + options: [ + { + name: "--cluster-id", + description: "The ID of the cluster", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: "The maximum number of items to return", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: "The token to retrieve the next page of results", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "update-channel-placement-group", + description: "Change the settings for a ChannelPlacementGroup", + options: [ + { + name: "--channel-placement-group-id", + description: "The ID of the channel placement group", + args: { + name: "string", + }, + }, + { + name: "--cluster-id", + description: "The ID of the cluster", + args: { + name: "string", + }, + }, + { + name: "--name", + description: + "Include this parameter only if you want to change the current name of the ChannelPlacementGroup. Specify a name that is unique in the Cluster. You can't change the name. Names are case-sensitive", + args: { + name: "string", + }, + }, + { + name: "--nodes", + description: + "Include this parameter only if you want to change the list of Nodes that are associated with the ChannelPlacementGroup", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "update-cluster", + description: "Change the settings for a Cluster", + options: [ + { + name: "--cluster-id", + description: "The ID of the cluster", + args: { + name: "string", + }, + }, + { + name: "--name", + description: + "Include this parameter only if you want to change the current name of the Cluster. Specify a name that is unique in the AWS account. You can't change the name. Names are case-sensitive", + args: { + name: "string", + }, + }, + { + name: "--network-settings", + description: + "Include this property only if you want to change the current connections between the Nodes in the Cluster and the Networks the Cluster is associated with", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "update-network", + description: "Change the settings for a Network", + options: [ + { + name: "--ip-pools", + description: + "Include this parameter only if you want to change the pool of IP addresses in the network. An array of IpPoolCreateRequests that identify a collection of IP addresses in this network that you want to reserve for use in MediaLive Anywhere. MediaLive Anywhere uses these IP addresses for Push inputs (in both Bridge and NAT networks) and for output destinations (only in Bridge networks). Each IpPoolUpdateRequest specifies one CIDR block", + args: { + name: "list", + }, + }, + { + name: "--name", + description: + "Include this parameter only if you want to change the name of the Network. Specify a name that is unique in the AWS account. Names are case-sensitive", + args: { + name: "string", + }, + }, + { + name: "--network-id", + description: "The ID of the network", + args: { + name: "string", + }, + }, + { + name: "--routes", + description: + "Include this parameter only if you want to change or add routes in the Network. An array of Routes that MediaLive Anywhere needs to know about in order to route encoding traffic", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "update-node", + description: "Change the settings for a Node", + options: [ + { + name: "--cluster-id", + description: "The ID of the cluster", + args: { + name: "string", + }, + }, + { + name: "--name", + description: + "Include this parameter only if you want to change the current name of the Node. Specify a name that is unique in the Cluster. You can't change the name. Names are case-sensitive", + args: { + name: "string", + }, + }, + { + name: "--node-id", + description: "The ID of the node", + args: { + name: "string", + }, + }, + { + name: "--role", + description: + "The initial role of the Node in the Cluster. ACTIVE means the Node is available for encoding. BACKUP means the Node is a redundant Node and might get used if an ACTIVE Node fails", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "update-node-state", + description: "Update the state of a node", + options: [ + { + name: "--cluster-id", + description: "The ID of the cluster", + args: { + name: "string", + }, + }, + { + name: "--node-id", + description: "The ID of the node", + args: { + name: "string", + }, + }, + { + name: "--state", + description: + "The state to apply to the Node. Set to ACTIVE (COMMISSIONED) to indicate that the Node is deployable. MediaLive Anywhere will consider this node it needs a Node to run a Channel on, or when it needs a Node to promote from a backup node to an active node. Set to DRAINING to isolate the Node so that MediaLive Anywhere won't use it", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "wait", + description: + "Wait until a particular condition is satisfied. Each subcommand polls an API until the listed requirement is met", + subcommands: [ + { + name: "channel-created", + description: + "Wait until a channel has been created It will poll every 3 seconds until a successful state has been reached. This will exit with a return code of 255 after 5 failed checks", + options: [ + { + name: "--channel-id", + description: "Channel ID", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "channel-deleted", + description: + "Wait until a channel has been deleted It will poll every 5 seconds until a successful state has been reached. This will exit with a return code of 255 after 84 failed checks", + options: [ + { + name: "--channel-id", + description: "Channel ID", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "channel-placement-group-assigned", + description: + "Wait until the channel placement group has been assigned It will poll every 3 seconds until a successful state has been reached. This will exit with a return code of 255 after 5 failed checks", + options: [ + { + name: "--channel-placement-group-id", + description: "The ID of the channel placement group", + args: { + name: "string", + }, + }, + { + name: "--cluster-id", + description: "The ID of the cluster", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "channel-placement-group-deleted", + description: + "Wait until the channel placement group has been deleted It will poll every 5 seconds until a successful state has been reached. This will exit with a return code of 255 after 20 failed checks", + options: [ + { + name: "--channel-placement-group-id", + description: "The ID of the channel placement group", + args: { + name: "string", + }, + }, + { + name: "--cluster-id", + description: "The ID of the cluster", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "channel-placement-group-unassigned", + description: + "Wait until the channel placement group has been unassigned It will poll every 5 seconds until a successful state has been reached. This will exit with a return code of 255 after 20 failed checks", + options: [ + { + name: "--channel-placement-group-id", + description: "The ID of the channel placement group", + args: { + name: "string", + }, + }, + { + name: "--cluster-id", + description: "The ID of the cluster", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "channel-running", + description: + "Wait until a channel is running It will poll every 5 seconds until a successful state has been reached. This will exit with a return code of 255 after 120 failed checks", + options: [ + { + name: "--channel-id", + description: "Channel ID", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "channel-stopped", + description: + "Wait until a channel has is stopped It will poll every 5 seconds until a successful state has been reached. This will exit with a return code of 255 after 60 failed checks", + options: [ + { + name: "--channel-id", + description: "Channel ID", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "cluster-created", + description: + "Wait until a cluster has been created It will poll every 3 seconds until a successful state has been reached. This will exit with a return code of 255 after 5 failed checks", + options: [ + { + name: "--cluster-id", + description: "The ID of the cluster", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "cluster-deleted", + description: + "Wait until a cluster has been deleted It will poll every 5 seconds until a successful state has been reached. This will exit with a return code of 255 after 20 failed checks", + options: [ + { + name: "--cluster-id", + description: "The ID of the cluster", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "input-attached", + description: + "Wait until an input has been attached It will poll every 5 seconds until a successful state has been reached. This will exit with a return code of 255 after 20 failed checks", + options: [ + { + name: "--input-id", + description: "Unique ID of the input", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "input-deleted", + description: + "Wait until an input has been deleted It will poll every 5 seconds until a successful state has been reached. This will exit with a return code of 255 after 20 failed checks", + options: [ + { + name: "--input-id", + description: "Unique ID of the input", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "input-detached", + description: + "Wait until an input has been detached It will poll every 5 seconds until a successful state has been reached. This will exit with a return code of 255 after 84 failed checks", + options: [ + { + name: "--input-id", + description: "Unique ID of the input", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", args: { name: "string", suggestions: ["input", "output"], @@ -4927,6 +6244,82 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "node-deregistered", + description: + "Wait until a node has been deregistered It will poll every 5 seconds until a successful state has been reached. This will exit with a return code of 255 after 20 failed checks", + options: [ + { + name: "--cluster-id", + description: "The ID of the cluster", + args: { + name: "string", + }, + }, + { + name: "--node-id", + description: "The ID of the node", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "node-registered", + description: + "Wait until a node has been registered It will poll every 3 seconds until a successful state has been reached. This will exit with a return code of 255 after 5 failed checks", + options: [ + { + name: "--cluster-id", + description: "The ID of the cluster", + args: { + name: "string", + }, + }, + { + name: "--node-id", + description: "The ID of the node", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "signal-map-created", description: diff --git a/src/aws/omics.ts b/src/aws/omics.ts index bcece7d8f924..53a223c9fb21 100644 --- a/src/aws/omics.ts +++ b/src/aws/omics.ts @@ -540,7 +540,8 @@ const completionSpec: Fig.Spec = { }, { name: "create-run-group", - description: "Creates a run group", + description: + "You can optionally create a run group to limit the compute resources for the runs that you add to the group", options: [ { name: "--name", @@ -551,21 +552,24 @@ const completionSpec: Fig.Spec = { }, { name: "--max-cpus", - description: "The maximum number of CPUs to use in the group", + description: + "The maximum number of CPUs that can run concurrently across all active runs in the run group", args: { name: "integer", }, }, { name: "--max-runs", - description: "The maximum number of concurrent runs for the group", + description: + "The maximum number of runs that can be running at the same time", args: { name: "integer", }, }, { name: "--max-duration", - description: "A maximum run time for the group in minutes", + description: + "The maximum time for each run (in minutes). If a run exceeds the maximum run time, the run fails automatically", args: { name: "integer", }, @@ -587,7 +591,8 @@ const completionSpec: Fig.Spec = { }, { name: "--max-gpus", - description: "The maximum GPUs that can be used by a run group", + description: + "The maximum number of GPUs that can run concurrently across all active runs in the run group", args: { name: "integer", }, @@ -689,7 +694,7 @@ const completionSpec: Fig.Spec = { { name: "create-share", description: - "Creates a cross-account shared resource. The resource owner makes an offer to share the resource with the principal subscriber (an AWS user with a different account than the resource owner). The following resources support cross-account sharing: Healthomics variant stores Healthomics annotation stores Private workflows", + "Creates a cross-account shared resource. The resource owner makes an offer to share the resource with the principal subscriber (an AWS user with a different account than the resource owner). The following resources support cross-account sharing: HealthOmics variant stores HealthOmics annotation stores Private workflows", options: [ { name: "--resource-arn", @@ -845,7 +850,8 @@ const completionSpec: Fig.Spec = { }, { name: "--storage-capacity", - description: "The storage capacity for the workflow in gibibytes", + description: + "The default storage capacity for the workflow runs, in gibibytes", args: { name: "integer", }, diff --git a/src/aws/organizations.ts b/src/aws/organizations.ts index 27aecda13701..0e0b5e260e57 100644 --- a/src/aws/organizations.ts +++ b/src/aws/organizations.ts @@ -110,7 +110,7 @@ const completionSpec: Fig.Spec = { { name: "close-account", description: - "Closes an Amazon Web Services member account within an organization. You can close an account when all features are enabled . You can't close the management account with this API. This is an asynchronous request that Amazon Web Services performs in the background. Because CloseAccount operates asynchronously, it can return a successful completion message even though account closure might still be in progress. You need to wait a few minutes before the account is fully closed. To check the status of the request, do one of the following: Use the AccountId that you sent in the CloseAccount request to provide as a parameter to the DescribeAccount operation. While the close account request is in progress, Account status will indicate PENDING_CLOSURE. When the close account request completes, the status will change to SUSPENDED. Check the CloudTrail log for the CloseAccountResult event that gets published after the account closes successfully. For information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the Organizations User Guide. You can close only 10% of member accounts, between 10 and 1000, within a rolling 30 day period. This quota is not bound by a calendar month, but starts when you close an account. After you reach this limit, you can close additional accounts. For more information, see Closing a member account in your organization and Quotas for Organizationsin the Organizations User Guide. To reinstate a closed account, contact Amazon Web Services Support within the 90-day grace period while the account is in SUSPENDED status. If the Amazon Web Services account you attempt to close is linked to an Amazon Web Services GovCloud (US) account, the CloseAccount request will close both accounts. To learn important pre-closure details, see Closing an Amazon Web Services GovCloud (US) account in the Amazon Web Services GovCloud User Guide", + "Closes an Amazon Web Services member account within an organization. You can close an account when all features are enabled . You can't close the management account with this API. This is an asynchronous request that Amazon Web Services performs in the background. Because CloseAccount operates asynchronously, it can return a successful completion message even though account closure might still be in progress. You need to wait a few minutes before the account is fully closed. To check the status of the request, do one of the following: Use the AccountId that you sent in the CloseAccount request to provide as a parameter to the DescribeAccount operation. While the close account request is in progress, Account status will indicate PENDING_CLOSURE. When the close account request completes, the status will change to SUSPENDED. Check the CloudTrail log for the CloseAccountResult event that gets published after the account closes successfully. For information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the Organizations User Guide. You can close only 10% of member accounts, between 10 and 1000, within a rolling 30 day period. This quota is not bound by a calendar month, but starts when you close an account. After you reach this limit, you can't close additional accounts. For more information, see Closing a member account in your organization and Quotas for Organizations in the Organizations User Guide. To reinstate a closed account, contact Amazon Web Services Support within the 90-day grace period while the account is in SUSPENDED status. If the Amazon Web Services account you attempt to close is linked to an Amazon Web Services GovCloud (US) account, the CloseAccount request will close both accounts. To learn important pre-closure details, see Closing an Amazon Web Services GovCloud (US) account in the Amazon Web Services GovCloud User Guide", options: [ { name: "--account-id", @@ -142,7 +142,7 @@ const completionSpec: Fig.Spec = { { name: "create-account", description: - "Creates an Amazon Web Services account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that Amazon Web Services performs in the background. Because CreateAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following: Use the Id value of the CreateAccountStatus response element from this operation to provide as a parameter to the DescribeCreateAccountStatus operation. Check the CloudTrail log for the CreateAccountResult event. For information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the Organizations User Guide. The user who calls the API to create an account must have the organizations:CreateAccount permission. If you enabled all features in the organization, Organizations creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see Organizations and service-linked roles in the Organizations User Guide. If the request includes tags, then the requester must have the organizations:TagResource permission. Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the management account administrator permissions in the new member account. Principals in the management account can assume the role. Organizations clones the company name and address information for the new account from the organization's management account. This operation can be called only from the organization's management account. For more information about creating accounts, see Creating a member account in your organization in the Organizations User Guide. When you create an account in an organization using the Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. For more information, see Considerations before removing an account from an organization in the Organizations User Guide. If you get an exception that indicates that you exceeded your account limits for the organization, contact Amazon Web Services Support. If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact Amazon Web Services Support. Using CreateAccount to create multiple temporary accounts isn't recommended. You can only close an account from the Billing and Cost Management console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing a member account in your organization in the Organizations User Guide. When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting access to your billing information and tools", + "Creates an Amazon Web Services account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that Amazon Web Services performs in the background. Because CreateAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following: Use the Id value of the CreateAccountStatus response element from this operation to provide as a parameter to the DescribeCreateAccountStatus operation. Check the CloudTrail log for the CreateAccountResult event. For information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the Organizations User Guide. The user who calls the API to create an account must have the organizations:CreateAccount permission. If you enabled all features in the organization, Organizations creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see Organizations and service-linked roles in the Organizations User Guide. If the request includes tags, then the requester must have the organizations:TagResource permission. Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the management account administrator permissions in the new member account. Principals in the management account can assume the role. Organizations clones the company name and address information for the new account from the organization's management account. This operation can be called only from the organization's management account. For more information about creating accounts, see Creating a member account in your organization in the Organizations User Guide. When you create an account in an organization using the Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. For more information, see Considerations before removing an account from an organization in the Organizations User Guide. If you get an exception that indicates that you exceeded your account limits for the organization, contact Amazon Web Services Support. If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact Amazon Web Services Support. It isn't recommended to use CreateAccount to create multiple temporary accounts, and using the CreateAccount API to close accounts is subject to a 30-day usage quota. For information on the requirements and process for closing an account, see Closing a member account in your organization in the Organizations User Guide. When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting access to your billing information and tools", options: [ { name: "--email", @@ -955,7 +955,7 @@ const completionSpec: Fig.Spec = { { name: "enable-aws-service-access", description: - "Enables the integration of an Amazon Web Services service (the service that is specified by ServicePrincipal) with Organizations. When you enable integration, you allow the specified service to create a service-linked role in all the accounts in your organization. This allows the service to perform operations on your behalf in your organization and its accounts. We recommend that you enable integration between Organizations and the specified Amazon Web Services service by using the console or commands that are provided by the specified service. Doing so ensures that the service is aware that it can create the resources that are required for the integration. How the service creates those resources in the organization's accounts depends on that service. For more information, see the documentation for the other Amazon Web Services service. For more information about enabling services to integrate with Organizations, see Using Organizations with other Amazon Web Services services in the Organizations User Guide. You can only call this operation from the organization's management account and only if the organization has enabled all features", + "Provides an Amazon Web Services service (the service that is specified by ServicePrincipal) with permissions to view the structure of an organization, create a service-linked role in all the accounts in the organization, and allow the service to perform operations on behalf of the organization and its accounts. Establishing these permissions can be a first step in enabling the integration of an Amazon Web Services service with Organizations. We recommend that you enable integration between Organizations and the specified Amazon Web Services service by using the console or commands that are provided by the specified service. Doing so ensures that the service is aware that it can create the resources that are required for the integration. How the service creates those resources in the organization's accounts depends on that service. For more information, see the documentation for the other Amazon Web Services service. For more information about enabling services to integrate with Organizations, see Using Organizations with other Amazon Web Services services in the Organizations User Guide. You can only call this operation from the organization's management account and only if the organization has enabled all features", options: [ { name: "--service-principal", @@ -1099,7 +1099,7 @@ const completionSpec: Fig.Spec = { { name: "leave-organization", description: - "Removes a member account from its parent organization. This version of the operation is performed by the account that wants to leave. To remove a member account as a user in the management account, use RemoveAccountFromOrganization instead. This operation can be called only from a member account in the organization. The management account in an organization with all features enabled can set service control policies (SCPs) that can restrict what administrators of member accounts can do. This includes preventing them from successfully calling LeaveOrganization and leaving the organization. You can leave an organization as a member account only if the account is configured with the information required to operate as a standalone account. When you create an account in an organization using the Organizations console, API, or CLI commands, the information required of standalone accounts is not automatically collected. For each account that you want to make standalone, you must perform the following steps. If any of the steps are already completed for this account, that step doesn't appear. Choose a support plan Provide and verify the required contact information Provide a current payment method Amazon Web Services uses the payment method to charge for any billable (not free tier) Amazon Web Services activity that occurs while the account isn't attached to an organization. For more information, see Considerations before removing an account from an organization in the Organizations User Guide. The account that you want to leave must not be a delegated administrator account for any Amazon Web Services service enabled for your organization. If the account is a delegated administrator, you must first change the delegated administrator account to another account that is remaining in the organization. You can leave an organization only after you enable IAM user access to billing in your account. For more information, see About IAM access to the Billing and Cost Management console in the Amazon Web Services Billing and Cost Management User Guide. After the account leaves the organization, all tags that were attached to the account object in the organization are deleted. Amazon Web Services accounts outside of an organization do not support tags. A newly created account has a waiting period before it can be removed from its organization. If you get an error that indicates that a wait period is required, then try again in a few days. If you are using an organization principal to call LeaveOrganization across multiple accounts, you can only do this up to 5 accounts per second in a single organization", + "Removes a member account from its parent organization. This version of the operation is performed by the account that wants to leave. To remove a member account as a user in the management account, use RemoveAccountFromOrganization instead. This operation can be called only from a member account in the organization. The management account in an organization with all features enabled can set service control policies (SCPs) that can restrict what administrators of member accounts can do. This includes preventing them from successfully calling LeaveOrganization and leaving the organization. You can leave an organization as a member account only if the account is configured with the information required to operate as a standalone account. When you create an account in an organization using the Organizations console, API, or CLI commands, the information required of standalone accounts is not automatically collected. For each account that you want to make standalone, you must perform the following steps. If any of the steps are already completed for this account, that step doesn't appear. Choose a support plan Provide and verify the required contact information Provide a current payment method Amazon Web Services uses the payment method to charge for any billable (not free tier) Amazon Web Services activity that occurs while the account isn't attached to an organization. For more information, see Considerations before removing an account from an organization in the Organizations User Guide. The account that you want to leave must not be a delegated administrator account for any Amazon Web Services service enabled for your organization. If the account is a delegated administrator, you must first change the delegated administrator account to another account that is remaining in the organization. You can leave an organization only after you enable IAM user access to billing in your account. For more information, see About IAM access to the Billing and Cost Management console in the Amazon Web Services Billing and Cost Management User Guide. After the account leaves the organization, all tags that were attached to the account object in the organization are deleted. Amazon Web Services accounts outside of an organization do not support tags. A newly created account has a waiting period before it can be removed from its organization. You must wait until at least seven days after the account was created. Invited accounts aren't subject to this waiting period. If you are using an organization principal to call LeaveOrganization across multiple accounts, you can only do this up to 5 accounts per second in a single organization", options: [ { name: "--cli-input-json", diff --git a/src/aws/pca-connector-scep.ts b/src/aws/pca-connector-scep.ts index ae00c5c7e0ab..c1433789a18a 100644 --- a/src/aws/pca-connector-scep.ts +++ b/src/aws/pca-connector-scep.ts @@ -1,7 +1,7 @@ const completionSpec: Fig.Spec = { name: "pca-connector-scep", description: - "Connector for SCEP (Preview) is in preview release for Amazon Web Services Private Certificate Authority and is subject to change. Connector for SCEP (Preview) creates a connector between Amazon Web Services Private CA and your SCEP-enabled clients and devices. For more information, see Connector for SCEP in the Amazon Web Services Private CA User Guide", + "Connector for SCEP creates a connector between Amazon Web Services Private CA and your SCEP-enabled clients and devices. For more information, see Connector for SCEP in the Amazon Web Services Private CA User Guide", subcommands: [ { name: "create-challenge", diff --git a/src/aws/pcs.ts b/src/aws/pcs.ts new file mode 100644 index 000000000000..e2ebc929dea4 --- /dev/null +++ b/src/aws/pcs.ts @@ -0,0 +1,1033 @@ +const completionSpec: Fig.Spec = { + name: "pcs", + description: + "Amazon Web Services Parallel Computing Service (Amazon Web Services PCS) is a managed service that makes it easier for you to run and scale your high performance computing (HPC) workloads, and build scientific and engineering models on Amazon Web Services using Slurm. For more information, see the Amazon Web Services Parallel Computing Service User Guide. This reference describes the actions and data types of the service management API. You can use the Amazon Web Services SDKs to call the API actions in software, or use the Command Line Interface (CLI) to call the API actions manually. These API actions manage the service through an Amazon Web Services account. The API actions operate on Amazon Web Services PCS resources. A resource is an entity in Amazon Web Services that you can work with. Amazon Web Services services create resources when you use the features of the service. Examples of Amazon Web Services PCS resources include clusters, compute node groups, and queues. For more information about resources in Amazon Web Services, see Resource in the Resource Explorer User Guide. An Amazon Web Services PCS compute node is an Amazon EC2 instance. You don't launch compute nodes directly. Amazon Web Services PCS uses configuration information that you provide to launch compute nodes in your Amazon Web Services account. You receive billing charges for your running compute nodes. Amazon Web Services PCS automatically terminates your compute nodes when you delete the Amazon Web Services PCS resources related to those compute nodes", + subcommands: [ + { + name: "create-cluster", + description: + "Creates a cluster in your account. Amazon Web Services PCS creates the cluster controller in a service-owned account. The cluster controller communicates with the cluster resources in your account. The subnets and security groups for the cluster must already exist before you use this API action. It takes time for Amazon Web Services PCS to create the cluster. The cluster is in a Creating state until it is ready to use. There can only be 1 cluster in a Creating state per Amazon Web Services Region per Amazon Web Services account. CreateCluster fails with a ServiceQuotaExceededException if there is already a cluster in a Creating state", + options: [ + { + name: "--cluster-name", + description: "A name to identify the cluster. Example: MyCluster", + args: { + name: "string", + }, + }, + { + name: "--scheduler", + description: + "The cluster management and job scheduling software associated with the cluster", + args: { + name: "structure", + }, + }, + { + name: "--size", + description: + "A value that determines the maximum number of compute nodes in the cluster and the maximum number of jobs (active and queued). SMALL: 32 compute nodes and 256 jobs MEDIUM: 512 compute nodes and 8192 jobs LARGE: 2048 compute nodes and 16,384 jobs", + args: { + name: "string", + }, + }, + { + name: "--networking", + description: + "The networking configuration used to set up the cluster's control plane", + args: { + name: "structure", + }, + }, + { + name: "--slurm-configuration", + description: "Additional options related to the Slurm scheduler", + args: { + name: "structure", + }, + }, + { + name: "--client-token", + description: + "A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, the subsequent retries with the same client token return the result from the original successful request and they have no additional effect. If you don't specify a client token, the CLI and SDK automatically generate 1 for you", + args: { + name: "string", + }, + }, + { + name: "--tags", + description: + "1 or more tags added to the resource. Each tag consists of a tag key and tag value. The tag value is optional and can be an empty string", + args: { + name: "map", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "create-compute-node-group", + description: + "Creates a managed set of compute nodes. You associate a compute node group with a cluster through 1 or more Amazon Web Services PCS queues or as part of the login fleet. A compute node group includes the definition of the compute properties and lifecycle management. Amazon Web Services PCS uses the information you provide to this API action to launch compute nodes in your account. You can only specify subnets in the same Amazon VPC as your cluster. You receive billing charges for the compute nodes that Amazon Web Services PCS launches in your account. You must already have a launch template before you call this API. For more information, see Launch an instance from a launch template in the Amazon Elastic Compute Cloud User Guide for Linux Instances", + options: [ + { + name: "--cluster-identifier", + description: + "The name or ID of the cluster to create a compute node group in", + args: { + name: "string", + }, + }, + { + name: "--compute-node-group-name", + description: "A name to identify the cluster. Example: MyCluster", + args: { + name: "string", + }, + }, + { + name: "--ami-id", + description: + "The ID of the Amazon Machine Image (AMI) that Amazon Web Services PCS uses to launch compute nodes (Amazon EC2 instances). If you don't provide this value, Amazon Web Services PCS uses the AMI ID specified in the custom launch template", + args: { + name: "string", + }, + }, + { + name: "--subnet-ids", + description: + "The list of subnet IDs where the compute node group launches instances. Subnets must be in the same VPC as the cluster", + args: { + name: "list", + }, + }, + { + name: "--purchase-option", + description: + "Specifies how EC2 instances are purchased on your behalf. Amazon Web Services PCS supports On-Demand and Spot instances. For more information, see Instance purchasing options in the Amazon Elastic Compute Cloud User Guide. If you don't provide this option, it defaults to On-Demand", + args: { + name: "string", + }, + }, + { + name: "--custom-launch-template", + description: + "An Amazon EC2 launch template Amazon Web Services PCS uses to launch compute nodes", + args: { + name: "structure", + }, + }, + { + name: "--iam-instance-profile-arn", + description: + "The Amazon Resource Name (ARN) of the IAM instance profile used to pass an IAM role when launching EC2 instances. The role contained in your instance profile must have pcs:RegisterComputeNodeGroupInstance permissions attached in order to provision instances correctly. The resource identifier of the ARN must start with AWSPCS. For example, arn:aws:iam:123456789012:instance-profile/AWSPCSMyComputeNodeInstanceProfile", + args: { + name: "string", + }, + }, + { + name: "--scaling-configuration", + description: + "Specifies the boundaries of the compute node group auto scaling", + args: { + name: "structure", + }, + }, + { + name: "--instance-configs", + description: + "A list of EC2 instance configurations that Amazon Web Services PCS can provision in the compute node group", + args: { + name: "list", + }, + }, + { + name: "--spot-options", + description: + "Additional configuration when you specify SPOT as the purchaseOption for the CreateComputeNodeGroup API action", + args: { + name: "structure", + }, + }, + { + name: "--slurm-configuration", + description: "Additional options related to the Slurm scheduler", + args: { + name: "structure", + }, + }, + { + name: "--client-token", + description: + "A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, the subsequent retries with the same client token return the result from the original successful request and they have no additional effect. If you don't specify a client token, the CLI and SDK automatically generate 1 for you", + args: { + name: "string", + }, + }, + { + name: "--tags", + description: + "1 or more tags added to the resource. Each tag consists of a tag key and tag value. The tag value is optional and can be an empty string", + args: { + name: "map", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "create-queue", + description: + "Creates a job queue. You must associate 1 or more compute node groups with the queue. You can associate 1 compute node group with multiple queues", + options: [ + { + name: "--cluster-identifier", + description: + "The name or ID of the cluster for which to create a queue", + args: { + name: "string", + }, + }, + { + name: "--queue-name", + description: "A name to identify the queue", + args: { + name: "string", + }, + }, + { + name: "--compute-node-group-configurations", + description: + "The list of compute node group configurations to associate with the queue. Queues assign jobs to associated compute node groups", + args: { + name: "list", + }, + }, + { + name: "--client-token", + description: + "A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, the subsequent retries with the same client token return the result from the original successful request and they have no additional effect. If you don't specify a client token, the CLI and SDK automatically generate 1 for you", + args: { + name: "string", + }, + }, + { + name: "--tags", + description: + "1 or more tags added to the resource. Each tag consists of a tag key and tag value. The tag value is optional and can be an empty string", + args: { + name: "map", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "delete-cluster", + description: + "Deletes a cluster and all its linked resources. You must delete all queues and compute node groups associated with the cluster before you can delete the cluster", + options: [ + { + name: "--cluster-identifier", + description: "The name or ID of the cluster to delete", + args: { + name: "string", + }, + }, + { + name: "--client-token", + description: + "A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, the subsequent retries with the same client token return the result from the original successful request and they have no additional effect. If you don't specify a client token, the CLI and SDK automatically generate 1 for you", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "delete-compute-node-group", + description: + "Deletes a compute node group. You must delete all queues associated with the compute node group first", + options: [ + { + name: "--cluster-identifier", + description: + "The name or ID of the cluster of the compute node group", + args: { + name: "string", + }, + }, + { + name: "--compute-node-group-identifier", + description: "The name or ID of the compute node group to delete", + args: { + name: "string", + }, + }, + { + name: "--client-token", + description: + "A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, the subsequent retries with the same client token return the result from the original successful request and they have no additional effect. If you don't specify a client token, the CLI and SDK automatically generate 1 for you", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "delete-queue", + description: + "Deletes a job queue. If the compute node group associated with this queue isn't associated with any other queues, Amazon Web Services PCS terminates all the compute nodes for this queue", + options: [ + { + name: "--cluster-identifier", + description: "The name or ID of the cluster of the queue", + args: { + name: "string", + }, + }, + { + name: "--queue-identifier", + description: "The name or ID of the queue to delete", + args: { + name: "string", + }, + }, + { + name: "--client-token", + description: + "A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, the subsequent retries with the same client token return the result from the original successful request and they have no additional effect. If you don't specify a client token, the CLI and SDK automatically generate 1 for you", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-cluster", + description: + "Returns detailed information about a running cluster in your account. This API action provides networking information, endpoint information for communication with the scheduler, and provisioning status", + options: [ + { + name: "--cluster-identifier", + description: "The name or ID of the cluster of the queue", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-compute-node-group", + description: + "Returns detailed information about a compute node group. This API action provides networking information, EC2 instance type, compute node group status, and scheduler (such as Slurm) configuration", + options: [ + { + name: "--cluster-identifier", + description: "The name or ID of the cluster", + args: { + name: "string", + }, + }, + { + name: "--compute-node-group-identifier", + description: "The name or ID of the compute node group", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-queue", + description: + "Returns detailed information about a queue. The information includes the compute node groups that the queue uses to schedule jobs", + options: [ + { + name: "--cluster-identifier", + description: "The name or ID of the cluster of the queue", + args: { + name: "string", + }, + }, + { + name: "--queue-identifier", + description: "The name or ID of the queue", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-clusters", + description: "Returns a list of running clusters in your account", + options: [ + { + name: "--next-token", + description: + "The value of nextToken is a unique pagination token for each page of results returned. If nextToken is returned, there are more results available. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token returns an HTTP 400 InvalidToken error", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum number of results that are returned per call. You can use nextToken to obtain further pages of results. The default is 10 results, and the maximum allowed page size is 100 results. A value of 0 uses the default", + args: { + name: "integer", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-compute-node-groups", + description: + "Returns a list of all compute node groups associated with a cluster", + options: [ + { + name: "--cluster-identifier", + description: + "The name or ID of the cluster to list compute node groups for", + args: { + name: "string", + }, + }, + { + name: "--next-token", + description: + "The value of nextToken is a unique pagination token for each page of results returned. If nextToken is returned, there are more results available. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token returns an HTTP 400 InvalidToken error", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum number of results that are returned per call. You can use nextToken to obtain further pages of results. The default is 10 results, and the maximum allowed page size is 100 results. A value of 0 uses the default", + args: { + name: "integer", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-queues", + description: "Returns a list of all queues associated with a cluster", + options: [ + { + name: "--cluster-identifier", + description: "The name or ID of the cluster to list queues for", + args: { + name: "string", + }, + }, + { + name: "--next-token", + description: + "The value of nextToken is a unique pagination token for each page of results returned. If nextToken is returned, there are more results available. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token returns an HTTP 400 InvalidToken error", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum number of results that are returned per call. You can use nextToken to obtain further pages of results. The default is 10 results, and the maximum allowed page size is 100 results. A value of 0 uses the default", + args: { + name: "integer", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-tags-for-resource", + description: + "Returns a list of all tags on an Amazon Web Services PCS resource", + options: [ + { + name: "--resource-arn", + description: + "The Amazon Resource Name (ARN) of the resource for which to list tags", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "register-compute-node-group-instance", + description: + "This API action isn't intended for you to use. Amazon Web Services PCS uses this API action to register the compute nodes it launches in your account", + options: [ + { + name: "--cluster-identifier", + description: + "The name or ID of the cluster to register the compute node group instance in", + args: { + name: "string", + }, + }, + { + name: "--bootstrap-id", + description: "The client-generated token to allow for retries", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "tag-resource", + description: + "Adds or edits tags on an Amazon Web Services PCS resource. Each tag consists of a tag key and a tag value. The tag key and tag value are case-sensitive strings. The tag value can be an empty (null) string. To add a tag, specify a new tag key and a tag value. To edit a tag, specify an existing tag key and a new tag value", + options: [ + { + name: "--resource-arn", + description: "The Amazon Resource Name (ARN) of the resource", + args: { + name: "string", + }, + }, + { + name: "--tags", + description: + "1 or more tags added to the resource. Each tag consists of a tag key and tag value. The tag value is optional and can be an empty string", + args: { + name: "map", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "untag-resource", + description: + "Deletes tags from an Amazon Web Services PCS resource. To delete a tag, specify the tag key and the Amazon Resource Name (ARN) of the Amazon Web Services PCS resource", + options: [ + { + name: "--resource-arn", + description: "The Amazon Resource Name (ARN) of the resource", + args: { + name: "string", + }, + }, + { + name: "--tag-keys", + description: + "1 or more tag keys to remove from the resource. Specify only tag keys and not tag values", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "update-compute-node-group", + description: + "Updates a compute node group. You can update many of the fields related to your compute node group including the configurations for networking, compute nodes, and settings specific to your scheduler (such as Slurm)", + options: [ + { + name: "--cluster-identifier", + description: + "The name or ID of the cluster of the compute node group", + args: { + name: "string", + }, + }, + { + name: "--compute-node-group-identifier", + description: "The name or ID of the compute node group", + args: { + name: "string", + }, + }, + { + name: "--ami-id", + description: + "The ID of the Amazon Machine Image (AMI) that Amazon Web Services PCS uses to launch instances. If not provided, Amazon Web Services PCS uses the AMI ID specified in the custom launch template", + args: { + name: "string", + }, + }, + { + name: "--subnet-ids", + description: + "The list of subnet IDs where the compute node group provisions instances. The subnets must be in the same VPC as the cluster", + args: { + name: "list", + }, + }, + { + name: "--custom-launch-template", + description: + "An Amazon EC2 launch template Amazon Web Services PCS uses to launch compute nodes", + args: { + name: "structure", + }, + }, + { + name: "--purchase-option", + description: + "Specifies how EC2 instances are purchased on your behalf. Amazon Web Services PCS supports On-Demand and Spot instances. For more information, see Instance purchasing options in the Amazon Elastic Compute Cloud User Guide. If you don't provide this option, it defaults to On-Demand", + args: { + name: "string", + }, + }, + { + name: "--spot-options", + description: + "Additional configuration when you specify SPOT as the purchaseOption for the CreateComputeNodeGroup API action", + args: { + name: "structure", + }, + }, + { + name: "--scaling-configuration", + description: + "Specifies the boundaries of the compute node group auto scaling", + args: { + name: "structure", + }, + }, + { + name: "--iam-instance-profile-arn", + description: + "The Amazon Resource Name (ARN) of the IAM instance profile used to pass an IAM role when launching EC2 instances. The role contained in your instance profile must have pcs:RegisterComputeNodeGroupInstance permissions attached to provision instances correctly", + args: { + name: "string", + }, + }, + { + name: "--slurm-configuration", + description: "Additional options related to the Slurm scheduler", + args: { + name: "structure", + }, + }, + { + name: "--client-token", + description: + "A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, the subsequent retries with the same client token return the result from the original successful request and they have no additional effect. If you don't specify a client token, the CLI and SDK automatically generate 1 for you", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "update-queue", + description: + "Updates the compute node group configuration of a queue. Use this API to change the compute node groups that the queue can send jobs to", + options: [ + { + name: "--cluster-identifier", + description: "The name or ID of the cluster of the queue", + args: { + name: "string", + }, + }, + { + name: "--queue-identifier", + description: "The name or ID of the queue", + args: { + name: "string", + }, + }, + { + name: "--compute-node-group-configurations", + description: + "The list of compute node group configurations to associate with the queue. Queues assign jobs to associated compute node groups", + args: { + name: "list", + }, + }, + { + name: "--client-token", + description: + "A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, the subsequent retries with the same client token return the result from the original successful request and they have no additional effect. If you don't specify a client token, the CLI and SDK automatically generate 1 for you", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + ], +}; + +export default completionSpec; diff --git a/src/aws/personalize.ts b/src/aws/personalize.ts index 5fa71e08aa79..865df0ab57a3 100644 --- a/src/aws/personalize.ts +++ b/src/aws/personalize.ts @@ -859,7 +859,7 @@ const completionSpec: Fig.Spec = { { name: "create-solution", description: - "After you create a solution, you can\u2019t change its configuration. By default, all new solutions use automatic training. With automatic training, you incur training costs while your solution is active. You can't stop automatic training for a solution. To avoid unnecessary costs, make sure to delete the solution when you are finished. For information about training costs, see Amazon Personalize pricing. Creates the configuration for training a model (creating a solution version). This configuration includes the recipe to use for model training and optional training configuration, such as columns to use in training and feature transformation parameters. For more information about configuring a solution, see Creating and configuring a solution. By default, new solutions use automatic training to create solution versions every 7 days. You can change the training frequency. Automatic solution version creation starts one hour after the solution is ACTIVE. If you manually create a solution version within the hour, the solution skips the first automatic training. For more information, see Configuring automatic training. To turn off automatic training, set performAutoTraining to false. If you turn off automatic training, you must manually create a solution version by calling the CreateSolutionVersion operation. After training starts, you can get the solution version's Amazon Resource Name (ARN) with the ListSolutionVersions API operation. To get its status, use the DescribeSolutionVersion. After training completes you can evaluate model accuracy by calling GetSolutionMetrics. When you are satisfied with the solution version, you deploy it using CreateCampaign. The campaign provides recommendations to a client through the GetRecommendations API. Amazon Personalize doesn't support configuring the hpoObjective for solution hyperparameter optimization at this time. Status A solution can be in one of the following states: CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED DELETE PENDING > DELETE IN_PROGRESS To get the status of the solution, call DescribeSolution. If you use manual training, the status must be ACTIVE before you call CreateSolutionVersion. Related APIs ListSolutions CreateSolutionVersion DescribeSolution DeleteSolution ListSolutionVersions DescribeSolutionVersion", + "By default, all new solutions use automatic training. With automatic training, you incur training costs while your solution is active. To avoid unnecessary costs, when you are finished you can update the solution to turn off automatic training. For information about training costs, see Amazon Personalize pricing. Creates the configuration for training a model (creating a solution version). This configuration includes the recipe to use for model training and optional training configuration, such as columns to use in training and feature transformation parameters. For more information about configuring a solution, see Creating and configuring a solution. By default, new solutions use automatic training to create solution versions every 7 days. You can change the training frequency. Automatic solution version creation starts within one hour after the solution is ACTIVE. If you manually create a solution version within the hour, the solution skips the first automatic training. For more information, see Configuring automatic training. To turn off automatic training, set performAutoTraining to false. If you turn off automatic training, you must manually create a solution version by calling the CreateSolutionVersion operation. After training starts, you can get the solution version's Amazon Resource Name (ARN) with the ListSolutionVersions API operation. To get its status, use the DescribeSolutionVersion. After training completes you can evaluate model accuracy by calling GetSolutionMetrics. When you are satisfied with the solution version, you deploy it using CreateCampaign. The campaign provides recommendations to a client through the GetRecommendations API. Amazon Personalize doesn't support configuring the hpoObjective for solution hyperparameter optimization at this time. Status A solution can be in one of the following states: CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED DELETE PENDING > DELETE IN_PROGRESS To get the status of the solution, call DescribeSolution. If you use manual training, the status must be ACTIVE before you call CreateSolutionVersion. Related APIs UpdateSolution ListSolutions CreateSolutionVersion DescribeSolution DeleteSolution ListSolutionVersions DescribeSolutionVersion", options: [ { name: "--name", @@ -891,12 +891,12 @@ const completionSpec: Fig.Spec = { { name: "--perform-auto-training", description: - "Whether the solution uses automatic training to create new solution versions (trained models). The default is True and the solution automatically creates new solution versions every 7 days. You can change the training frequency by specifying a schedulingExpression in the AutoTrainingConfig as part of solution configuration. For more information about automatic training, see Configuring automatic training. Automatic solution version creation starts one hour after the solution is ACTIVE. If you manually create a solution version within the hour, the solution skips the first automatic training. After training starts, you can get the solution version's Amazon Resource Name (ARN) with the ListSolutionVersions API operation. To get its status, use the DescribeSolutionVersion", + "Whether the solution uses automatic training to create new solution versions (trained models). The default is True and the solution automatically creates new solution versions every 7 days. You can change the training frequency by specifying a schedulingExpression in the AutoTrainingConfig as part of solution configuration. For more information about automatic training, see Configuring automatic training. Automatic solution version creation starts within one hour after the solution is ACTIVE. If you manually create a solution version within the hour, the solution skips the first automatic training. After training starts, you can get the solution version's Amazon Resource Name (ARN) with the ListSolutionVersions API operation. To get its status, use the DescribeSolutionVersion", }, { name: "--no-perform-auto-training", description: - "Whether the solution uses automatic training to create new solution versions (trained models). The default is True and the solution automatically creates new solution versions every 7 days. You can change the training frequency by specifying a schedulingExpression in the AutoTrainingConfig as part of solution configuration. For more information about automatic training, see Configuring automatic training. Automatic solution version creation starts one hour after the solution is ACTIVE. If you manually create a solution version within the hour, the solution skips the first automatic training. After training starts, you can get the solution version's Amazon Resource Name (ARN) with the ListSolutionVersions API operation. To get its status, use the DescribeSolutionVersion", + "Whether the solution uses automatic training to create new solution versions (trained models). The default is True and the solution automatically creates new solution versions every 7 days. You can change the training frequency by specifying a schedulingExpression in the AutoTrainingConfig as part of solution configuration. For more information about automatic training, see Configuring automatic training. Automatic solution version creation starts within one hour after the solution is ACTIVE. If you manually create a solution version within the hour, the solution skips the first automatic training. After training starts, you can get the solution version's Amazon Resource Name (ARN) with the ListSolutionVersions API operation. To get its status, use the DescribeSolutionVersion", }, { name: "--recipe-arn", @@ -925,7 +925,7 @@ const completionSpec: Fig.Spec = { { name: "--solution-config", description: - "The configuration to use with the solution. When performAutoML is set to true, Amazon Personalize only evaluates the autoMLConfig section of the solution configuration. Amazon Personalize doesn't support configuring the hpoObjective at this time", + "The configuration properties for the solution. When performAutoML is set to true, Amazon Personalize only evaluates the autoMLConfig section of the solution configuration. Amazon Personalize doesn't support configuring the hpoObjective at this time", args: { name: "structure", }, @@ -3444,6 +3444,55 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "update-solution", + description: + "Updates an Amazon Personalize solution to use a different automatic training configuration. When you update a solution, you can change whether the solution uses automatic training, and you can change the training frequency. For more information about updating a solution, see Updating a solution. A solution update can be in one of the following states: CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED To get the status of a solution update, call the DescribeSolution API operation and find the status in the latestSolutionUpdate", + options: [ + { + name: "--solution-arn", + description: + "The Amazon Resource Name (ARN) of the solution to update", + args: { + name: "string", + }, + }, + { + name: "--perform-auto-training", + description: + "Whether the solution uses automatic training to create new solution versions (trained models). You can change the training frequency by specifying a schedulingExpression in the AutoTrainingConfig as part of solution configuration. If you turn on automatic training, the first automatic training starts within one hour after the solution update completes. If you manually create a solution version within the hour, the solution skips the first automatic training. For more information about automatic training, see Configuring automatic training. After training starts, you can get the solution version's Amazon Resource Name (ARN) with the ListSolutionVersions API operation. To get its status, use the DescribeSolutionVersion", + }, + { + name: "--no-perform-auto-training", + description: + "Whether the solution uses automatic training to create new solution versions (trained models). You can change the training frequency by specifying a schedulingExpression in the AutoTrainingConfig as part of solution configuration. If you turn on automatic training, the first automatic training starts within one hour after the solution update completes. If you manually create a solution version within the hour, the solution skips the first automatic training. For more information about automatic training, see Configuring automatic training. After training starts, you can get the solution version's Amazon Resource Name (ARN) with the ListSolutionVersions API operation. To get its status, use the DescribeSolutionVersion", + }, + { + name: "--solution-update-config", + description: "The new configuration details of the solution", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, ], }; export default completionSpec; diff --git a/src/aws/pipes.ts b/src/aws/pipes.ts index 35aa679a3cd7..c4149be319f8 100644 --- a/src/aws/pipes.ts +++ b/src/aws/pipes.ts @@ -96,6 +96,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--kms-key-identifier", + description: + "The identifier of the KMS customer managed key for EventBridge to use, if you choose to use a customer managed key to encrypt pipe data. The identifier can be the key Amazon Resource Name (ARN), KeyId, key alias, or key alias ARN. If you do not specify a customer managed key identifier, EventBridge uses an Amazon Web Services owned key to encrypt pipe data. For more information, see Managing keys in the Key Management Service Developer Guide", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -520,6 +528,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--kms-key-identifier", + description: + "The identifier of the KMS customer managed key for EventBridge to use, if you choose to use a customer managed key to encrypt pipe data. The identifier can be the key Amazon Resource Name (ARN), KeyId, key alias, or key alias ARN. To update a pipe that is using the default Amazon Web Services owned key to use a customer managed key instead, or update a pipe that is using a customer managed key to use a different customer managed key, specify a customer managed key identifier. To update a pipe that is using a customer managed key to use the default Amazon Web Services owned key, specify an empty string. For more information, see Managing keys in the Key Management Service Developer Guide", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/qapps.ts b/src/aws/qapps.ts index 309726992442..422a2248c226 100644 --- a/src/aws/qapps.ts +++ b/src/aws/qapps.ts @@ -1,7 +1,7 @@ const completionSpec: Fig.Spec = { name: "qapps", description: - "The Amazon Q Apps feature capability within Amazon Q Business allows web experience users to create lightweight, purpose-built AI apps to fulfill specific tasks from within their web experience. For example, users can create an Q Appthat exclusively generates marketing-related content to improve your marketing team's productivity or a Q App for marketing content-generation like writing customer emails and creating promotional content using a certain style of voice, tone, and branding. For more information, see Amazon Q App in the Amazon Q Business User Guide", + "The Amazon Q Apps feature capability within Amazon Q Business allows web experience users to create lightweight, purpose-built AI apps to fulfill specific tasks from within their web experience. For example, users can create a Q App that exclusively generates marketing-related content to improve your marketing team's productivity or a Q App for writing customer emails and creating promotional content using a certain style of voice, tone, and branding. For more information on the capabilities, see Amazon Q Apps capabilities in the Amazon Q Business User Guide. For an overview of the Amazon Q App APIs, see Overview of Amazon Q Apps API operations. For information about the IAM access control permissions you need to use the Amazon Q Apps API, see IAM role for the Amazon Q Business web experience including Amazon Q Apps in the Amazon Q Business User Guide", subcommands: [ { name: "associate-library-item-review", @@ -957,8 +957,7 @@ const completionSpec: Fig.Spec = { }, { name: "update-library-item", - description: - "Updates the metadata and status of a library item for an Amazon Q App", + description: "Updates the library item for an Amazon Q App", options: [ { name: "--instance-id", @@ -1009,6 +1008,53 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "update-library-item-metadata", + description: + "Updates the verification status of a library item for an Amazon Q App", + options: [ + { + name: "--instance-id", + description: + "The unique identifier of the Amazon Q Business application environment instance", + args: { + name: "string", + }, + }, + { + name: "--library-item-id", + description: "The unique identifier of the updated library item", + args: { + name: "string", + }, + }, + { + name: "--is-verified", + description: "The verification status of the library item", + }, + { + name: "--no-is-verified", + description: "The verification status of the library item", + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "update-q-app", description: diff --git a/src/aws/qbusiness.ts b/src/aws/qbusiness.ts index fd35be852149..59530a44e795 100644 --- a/src/aws/qbusiness.ts +++ b/src/aws/qbusiness.ts @@ -263,6 +263,22 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--identity-type", + description: + "The authentication type being used by a Amazon Q Business application", + args: { + name: "string", + }, + }, + { + name: "--iam-identity-provider-arn", + description: + "The Amazon Resource Name (ARN) of an identity provider being used by an Amazon Q Business application", + args: { + name: "string", + }, + }, { name: "--identity-center-instance-arn", description: @@ -271,6 +287,13 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--client-ids-for-oidc", + description: "The OIDC client ID for a Amazon Q Business application", + args: { + name: "list", + }, + }, { name: "--description", description: "A description for the Amazon Q Business application", @@ -376,7 +399,7 @@ const completionSpec: Fig.Spec = { { name: "--configuration", description: - "Configuration information to connect to your data source repository. For configuration templates for your specific data source, see Supported connectors", + "Configuration information to connect your data source repository to Amazon Q Business. Use this parameter to provide a JSON schema with configuration information specific to your data source connector. Each data source has a JSON schema provided by Amazon Q Business that you must use. For example, the Amazon S3 and Web Crawler connectors require the following JSON schemas: Amazon S3 JSON schema Web Crawler JSON schema You can find configuration templates for your specific data source using the following steps: Navigate to the Supported connectors page in the Amazon Q Business User Guide, and select the data source of your choice. Then, from your specific data source connector page, select Using the API. You will find the JSON schema for your data source, including parameter descriptions, in this section", args: { name: "structure", }, @@ -791,7 +814,7 @@ const completionSpec: Fig.Spec = { { name: "--role-arn", description: - "The Amazon Resource Name (ARN) of the service role attached to your web experience", + "The Amazon Resource Name (ARN) of the service role attached to your web experience. You must provide this value if you're using IAM Identity Center to manage end user access to your application. If you're using legacy identity management to manage user access, you don't need to provide this value", args: { name: "string", }, @@ -812,6 +835,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--identity-provider-configuration", + description: + "Information about the identity provider (IdP) used to authenticate end users of an Amazon Q Business web experience", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: @@ -2636,7 +2667,7 @@ const completionSpec: Fig.Spec = { { name: "--group-name", description: - 'The list that contains your users or sub groups that belong the same group. For example, the group "Company" includes the user "CEO" and the sub groups "Research", "Engineering", and "Sales and Marketing". If you have more than 1000 users and/or sub groups for a single group, you need to provide the path to the S3 file that lists your users and sub groups for a group. Your sub groups can contain more than 1000 users, but the list of sub groups that belong to a group (and/or users) must be no more than 1000', + 'The list that contains your users or sub groups that belong the same group. For example, the group "Company" includes the user "CEO" and the sub groups "Research", "Engineering", and "Sales and Marketing"', args: { name: "string", }, @@ -2922,6 +2953,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--auto-subscription-configuration", + description: + "An option to enable updating the default subscription type assigned to an Amazon Q Business application using IAM identity federation for user management", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: @@ -3441,6 +3480,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--identity-provider-configuration", + description: + "Information about the identity provider (IdP) used to authenticate end users of an Amazon Q Business web experience", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/rds.ts b/src/aws/rds.ts index bf48f0a991e0..8fcdc50e5eab 100644 --- a/src/aws/rds.ts +++ b/src/aws/rds.ts @@ -1634,7 +1634,7 @@ const completionSpec: Fig.Spec = { { name: "--engine", description: - "The database engine to use for this DB instance. Not every database engine is available in every Amazon Web Services Region. Valid Values: aurora-mysql (for Aurora MySQL DB instances) aurora-postgresql (for Aurora PostgreSQL DB instances) custom-oracle-ee (for RDS Custom for Oracle DB instances) custom-oracle-ee-cdb (for RDS Custom for Oracle DB instances) custom-oracle-se2 (for RDS Custom for Oracle DB instances) custom-oracle-se2-cdb (for RDS Custom for Oracle DB instances) custom-sqlserver-ee (for RDS Custom for SQL Server DB instances) custom-sqlserver-se (for RDS Custom for SQL Server DB instances) custom-sqlserver-web (for RDS Custom for SQL Server DB instances) db2-ae db2-se mariadb mysql oracle-ee oracle-ee-cdb oracle-se2 oracle-se2-cdb postgres sqlserver-ee sqlserver-se sqlserver-ex sqlserver-web", + "The database engine to use for this DB instance. Not every database engine is available in every Amazon Web Services Region. Valid Values: aurora-mysql (for Aurora MySQL DB instances) aurora-postgresql (for Aurora PostgreSQL DB instances) custom-oracle-ee (for RDS Custom for Oracle DB instances) custom-oracle-ee-cdb (for RDS Custom for Oracle DB instances) custom-oracle-se2 (for RDS Custom for Oracle DB instances) custom-oracle-se2-cdb (for RDS Custom for Oracle DB instances) custom-sqlserver-ee (for RDS Custom for SQL Server DB instances) custom-sqlserver-se (for RDS Custom for SQL Server DB instances) custom-sqlserver-web (for RDS Custom for SQL Server DB instances) custom-sqlserver-dev (for RDS Custom for SQL Server DB instances) db2-ae db2-se mariadb mysql oracle-ee oracle-ee-cdb oracle-se2 oracle-se2-cdb postgres sqlserver-ee sqlserver-se sqlserver-ex sqlserver-web", args: { name: "string", }, @@ -1682,7 +1682,7 @@ const completionSpec: Fig.Spec = { { name: "--db-subnet-group-name", description: - "A DB subnet group to associate with this DB instance. Constraints: Must match the name of an existing DB subnet group. Must not be default. Example: mydbsubnetgroup", + "A DB subnet group to associate with this DB instance. Constraints: Must match the name of an existing DB subnet group. Example: mydbsubnetgroup", args: { name: "string", }, @@ -1758,7 +1758,7 @@ const completionSpec: Fig.Spec = { { name: "--license-model", description: - "The license model information for this DB instance. License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see RDS for Db2 licensing options in the Amazon RDS User Guide. The default for RDS for Db2 is bring-your-own-license. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - bring-your-own-license | marketplace-license RDS for MariaDB - general-public-license RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - postgresql-license", + "The license model information for this DB instance. License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group and an Amazon Web Services License Manager self-managed license. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see Amazon RDS for Db2 licensing options in the Amazon RDS User Guide. The default for RDS for Db2 is bring-your-own-license. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - bring-your-own-license | marketplace-license RDS for MariaDB - general-public-license RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - postgresql-license", args: { name: "string", }, @@ -2313,7 +2313,7 @@ const completionSpec: Fig.Spec = { { name: "--pre-signed-url", description: - "When you are creating a read replica from one Amazon Web Services GovCloud (US) Region to another or from one China Amazon Web Services Region to another, the URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica API operation in the source Amazon Web Services Region that contains the source DB instance. This setting applies only to Amazon Web Services GovCloud (US) Regions and China Amazon Web Services Regions. It's ignored in other Amazon Web Services Regions. This setting applies only when replicating from a source DB instance. Source DB clusters aren't supported in Amazon Web Services GovCloud (US) Regions and China Amazon Web Services Regions. You must specify this parameter when you create an encrypted read replica from another Amazon Web Services Region by using the Amazon RDS API. Don't specify PreSignedUrl when you are creating an encrypted read replica in the same Amazon Web Services Region. The presigned URL must be a valid request for the CreateDBInstanceReadReplica API operation that can run in the source Amazon Web Services Region that contains the encrypted source DB instance. The presigned URL request must contain the following parameter values: DestinationRegion - The Amazon Web Services Region that the encrypted read replica is created in. This Amazon Web Services Region is the same one where the CreateDBInstanceReadReplica operation is called that contains this presigned URL. For example, if you create an encrypted DB instance in the us-west-1 Amazon Web Services Region, from a source DB instance in the us-east-2 Amazon Web Services Region, then you call the CreateDBInstanceReadReplica operation in the us-east-1 Amazon Web Services Region and provide a presigned URL that contains a call to the CreateDBInstanceReadReplica operation in the us-west-2 Amazon Web Services Region. For this example, the DestinationRegion in the presigned URL must be set to the us-east-1 Amazon Web Services Region. KmsKeyId - The KMS key identifier for the key to use to encrypt the read replica in the destination Amazon Web Services Region. This is the same identifier for both the CreateDBInstanceReadReplica operation that is called in the destination Amazon Web Services Region, and the operation contained in the presigned URL. SourceDBInstanceIdentifier - The DB instance identifier for the encrypted DB instance to be replicated. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are creating an encrypted read replica from a DB instance in the us-west-2 Amazon Web Services Region, then your SourceDBInstanceIdentifier looks like the following example: arn:aws:rds:us-west-2:123456789012:instance:mysql-instance1-20161115. To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process. If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a presigned URL that is a valid request for the operation that can run in the source Amazon Web Services Region. SourceRegion isn't supported for SQL Server, because Amazon RDS for SQL Server doesn't support cross-Region read replicas. This setting doesn't apply to RDS Custom DB instances", + "When you are creating a read replica from one Amazon Web Services GovCloud (US) Region to another or from one China Amazon Web Services Region to another, the URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica API operation in the source Amazon Web Services Region that contains the source DB instance. This setting applies only to Amazon Web Services GovCloud (US) Regions and China Amazon Web Services Regions. It's ignored in other Amazon Web Services Regions. This setting applies only when replicating from a source DB instance. Source DB clusters aren't supported in Amazon Web Services GovCloud (US) Regions and China Amazon Web Services Regions. You must specify this parameter when you create an encrypted read replica from another Amazon Web Services Region by using the Amazon RDS API. Don't specify PreSignedUrl when you are creating an encrypted read replica in the same Amazon Web Services Region. The presigned URL must be a valid request for the CreateDBInstanceReadReplica API operation that can run in the source Amazon Web Services Region that contains the encrypted source DB instance. The presigned URL request must contain the following parameter values: DestinationRegion - The Amazon Web Services Region that the encrypted read replica is created in. This Amazon Web Services Region is the same one where the CreateDBInstanceReadReplica operation is called that contains this presigned URL. For example, if you create an encrypted DB instance in the us-west-1 Amazon Web Services Region, from a source DB instance in the us-east-2 Amazon Web Services Region, then you call the CreateDBInstanceReadReplica operation in the us-east-1 Amazon Web Services Region and provide a presigned URL that contains a call to the CreateDBInstanceReadReplica operation in the us-west-2 Amazon Web Services Region. For this example, the DestinationRegion in the presigned URL must be set to the us-east-1 Amazon Web Services Region. KmsKeyId - The KMS key identifier for the key to use to encrypt the read replica in the destination Amazon Web Services Region. This is the same identifier for both the CreateDBInstanceReadReplica operation that is called in the destination Amazon Web Services Region, and the operation contained in the presigned URL. SourceDBInstanceIdentifier - The DB instance identifier for the encrypted DB instance to be replicated. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are creating an encrypted read replica from a DB instance in the us-west-2 Amazon Web Services Region, then your SourceDBInstanceIdentifier looks like the following example: arn:aws:rds:us-west-2:123456789012:instance:mysql-instance1-20161115. To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process. If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a presigned URL that is a valid request for the operation that can run in the source Amazon Web Services Region. This setting doesn't apply to RDS Custom DB instances", args: { name: "string", }, @@ -3165,6 +3165,13 @@ const completionSpec: Fig.Spec = { description: "Specifies whether to enable storage encryption for the new global database cluster. Constraints: Can't be specified if SourceDBClusterIdentifier is specified. In this case, Amazon Aurora uses the setting from the source DB cluster", }, + { + name: "--tags", + description: "Tags to assign to the global cluster", + args: { + name: "list", + }, + }, { name: "--cli-input-json", description: @@ -8304,7 +8311,7 @@ const completionSpec: Fig.Spec = { { name: "list-tags-for-resource", description: - "Lists all tags on an Amazon RDS resource. For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources in the Amazon RDS User Guide", + "Lists all tags on an Amazon RDS resource. For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources in the Amazon RDS User Guide or Tagging Amazon Aurora and Amazon RDS Resources in the Amazon Aurora User Guide", options: [ { name: "--resource-name", @@ -10822,7 +10829,7 @@ const completionSpec: Fig.Spec = { { name: "remove-tags-from-resource", description: - "Removes metadata tags from an Amazon RDS resource. For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources in the Amazon RDS User Guide", + "Removes metadata tags from an Amazon RDS resource. For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources in the Amazon RDS User Guide or Tagging Amazon Aurora and Amazon RDS Resources in the Amazon Aurora User Guide", options: [ { name: "--resource-name", @@ -11914,7 +11921,7 @@ const completionSpec: Fig.Spec = { { name: "--license-model", description: - "License model information for the restored DB instance. License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see RDS for Db2 licensing options in the Amazon RDS User Guide. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - bring-your-own-license | marketplace-license RDS for MariaDB - general-public-license RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - postgresql-license Default: Same as the source", + "License model information for the restored DB instance. License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group and an Amazon Web Services License Manager self-managed license. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see Amazon RDS for Db2 licensing options in the Amazon RDS User Guide. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - bring-your-own-license | marketplace-license RDS for MariaDB - general-public-license RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - postgresql-license Default: Same as the source", args: { name: "string", }, @@ -12763,7 +12770,7 @@ const completionSpec: Fig.Spec = { { name: "--license-model", description: - "The license model information for the restored DB instance. License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see RDS for Db2 licensing options in the Amazon RDS User Guide. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - bring-your-own-license | marketplace-license RDS for MariaDB - general-public-license RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - postgresql-license Default: Same as the source", + "The license model information for the restored DB instance. License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group and an Amazon Web Services License Manager self-managed license. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see Amazon RDS for Db2 licensing options in the Amazon RDS User Guide. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - bring-your-own-license | marketplace-license RDS for MariaDB - general-public-license RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - postgresql-license Default: Same as the source", args: { name: "string", }, diff --git a/src/aws/redshift-data.ts b/src/aws/redshift-data.ts index 8c7ce89e2beb..67bed85724bb 100644 --- a/src/aws/redshift-data.ts +++ b/src/aws/redshift-data.ts @@ -48,6 +48,21 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--session-id", + description: "The session identifier of the query", + args: { + name: "string", + }, + }, + { + name: "--session-keep-alive-seconds", + description: + "The number of seconds to keep the session alive after the query finishes. The maximum time a session can keep alive is 24 hours. After 24 hours, the session is forced closed and the query is terminated", + args: { + name: "integer", + }, + }, { name: "--sqls", description: @@ -345,6 +360,21 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--session-id", + description: "The session identifier of the query", + args: { + name: "string", + }, + }, + { + name: "--session-keep-alive-seconds", + description: + "The number of seconds to keep the session alive after the query finishes. The maximum time a session can keep alive is 24 hours. After 24 hours, the session is forced closed and the query is terminated", + args: { + name: "integer", + }, + }, { name: "--sql", description: "The SQL statement text to run", diff --git a/src/aws/runtime.sagemaker.ts b/src/aws/runtime.sagemaker.ts index 98a3b7b48213..4536a10e2cbc 100644 --- a/src/aws/runtime.sagemaker.ts +++ b/src/aws/runtime.sagemaker.ts @@ -94,6 +94,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--session-id", + description: + "Creates a stateful session or identifies an existing one. You can do one of the following: Create a stateful session by specifying the value NEW_SESSION. Send your request to an existing stateful session by specifying the ID of that session. With a stateful session, you can send multiple requests to a stateful model. When you create a session with a stateful model, the model must create the session ID and set the expiration time. The model must also provide that information in the response to your request. You can get the ID and timestamp from the NewSessionId response parameter. For any subsequent request where you specify that session ID, SageMaker routes the request to the same instance that supports the session", + args: { + name: "string", + }, + }, { name: "outfile", description: "Filename where the content will be saved", diff --git a/src/aws/s3control.ts b/src/aws/s3control.ts index 4c0b6d1c53c9..282a3dbe06ef 100644 --- a/src/aws/s3control.ts +++ b/src/aws/s3control.ts @@ -11,7 +11,7 @@ const completionSpec: Fig.Spec = { { name: "--account-id", description: - "The ID of the Amazon Web Services account that is making this request", + "The Amazon Web Services account ID of the S3 Access Grants instance", args: { name: "string", }, @@ -51,7 +51,7 @@ const completionSpec: Fig.Spec = { { name: "--account-id", description: - "The ID of the Amazon Web Services account that is making this request", + "The Amazon Web Services account ID of the S3 Access Grants instance", args: { name: "string", }, @@ -139,7 +139,7 @@ const completionSpec: Fig.Spec = { { name: "--account-id", description: - "The ID of the Amazon Web Services account that is making this request", + "The Amazon Web Services account ID of the S3 Access Grants instance", args: { name: "string", }, @@ -187,7 +187,7 @@ const completionSpec: Fig.Spec = { { name: "--account-id", description: - "The ID of the Amazon Web Services account that is making this request", + "The Amazon Web Services account ID of the S3 Access Grants instance", args: { name: "string", }, @@ -675,7 +675,7 @@ const completionSpec: Fig.Spec = { { name: "--account-id", description: - "The ID of the Amazon Web Services account that is making this request", + "The Amazon Web Services account ID of the S3 Access Grants instance", args: { name: "string", }, @@ -715,7 +715,7 @@ const completionSpec: Fig.Spec = { { name: "--account-id", description: - "The ID of the Amazon Web Services account that is making this request", + "The Amazon Web Services account ID of the S3 Access Grants instance", args: { name: "string", }, @@ -747,7 +747,7 @@ const completionSpec: Fig.Spec = { { name: "--account-id", description: - "The ID of the Amazon Web Services account that is making this request", + "The Amazon Web Services account ID of the S3 Access Grants instance", args: { name: "string", }, @@ -779,7 +779,7 @@ const completionSpec: Fig.Spec = { { name: "--account-id", description: - "The ID of the Amazon Web Services account that is making this request", + "The Amazon Web Services account ID of the S3 Access Grants instance", args: { name: "string", }, @@ -1492,7 +1492,7 @@ const completionSpec: Fig.Spec = { { name: "--account-id", description: - "The ID of the Amazon Web Services account that is making this request", + "The Amazon Web Services account ID of the S3 Access Grants instance", args: { name: "string", }, @@ -1524,7 +1524,7 @@ const completionSpec: Fig.Spec = { { name: "--account-id", description: - "The ID of the Amazon Web Services account that is making this request", + "The Amazon Web Services account ID of the S3 Access Grants instance", args: { name: "string", }, @@ -1559,12 +1559,12 @@ const completionSpec: Fig.Spec = { { name: "get-access-grants-instance", description: - "Retrieves the S3 Access Grants instance for a Region in your account. Permissions You must have the s3:GetAccessGrantsInstance permission to use this operation", + "Retrieves the S3 Access Grants instance for a Region in your account. Permissions You must have the s3:GetAccessGrantsInstance permission to use this operation. GetAccessGrantsInstance is not supported for cross-account access. You can only call the API from the account that owns the S3 Access Grants instance", options: [ { name: "--account-id", description: - "The ID of the Amazon Web Services account that is making this request", + "The Amazon Web Services account ID of the S3 Access Grants instance", args: { name: "string", }, @@ -1636,7 +1636,7 @@ const completionSpec: Fig.Spec = { { name: "--account-id", description: - "The ID of the Amazon Web Services account that is making this request", + "The Amazon Web Services account ID of the S3 Access Grants instance", args: { name: "string", }, @@ -1668,7 +1668,7 @@ const completionSpec: Fig.Spec = { { name: "--account-id", description: - "The ID of the Amazon Web Services account that is making this request", + "The Amazon Web Services account ID of the S3 Access Grants instance", args: { name: "string", }, @@ -2225,7 +2225,7 @@ const completionSpec: Fig.Spec = { { name: "--account-id", description: - "The ID of the Amazon Web Services account that is making this request", + "The Amazon Web Services account ID of the S3 Access Grants instance", args: { name: "string", }, @@ -2644,7 +2644,7 @@ const completionSpec: Fig.Spec = { { name: "--account-id", description: - "The ID of the Amazon Web Services account that is making this request", + "The Amazon Web Services account ID of the S3 Access Grants instance", args: { name: "string", }, @@ -2732,7 +2732,7 @@ const completionSpec: Fig.Spec = { { name: "--account-id", description: - "The ID of the Amazon Web Services account that is making this request", + "The Amazon Web Services account ID of the S3 Access Grants instance", args: { name: "string", }, @@ -2780,7 +2780,7 @@ const completionSpec: Fig.Spec = { { name: "--account-id", description: - "The ID of the Amazon Web Services account that is making this request", + "The Amazon Web Services account ID of the S3 Access Grants instance", args: { name: "string", }, @@ -2956,6 +2956,96 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-caller-access-grants", + description: + "Returns a list of the access grants that were given to the caller using S3 Access Grants and that allow the caller to access the S3 data of the Amazon Web Services account specified in the request. Permissions You must have the s3:ListCallerAccessGrants permission to use this operation", + options: [ + { + name: "--account-id", + description: + "The Amazon Web Services account ID of the S3 Access Grants instance", + args: { + name: "string", + }, + }, + { + name: "--grant-scope", + description: + "The S3 path of the data that you would like to access. Must start with s3://. You can optionally pass only the beginning characters of a path, and S3 Access Grants will search for all applicable grants for the path fragment", + args: { + name: "string", + }, + }, + { + name: "--next-token", + description: + "A pagination token to request the next page of results. Pass this value into a subsequent List Caller Access Grants request in order to retrieve the next page of results", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum number of access grants that you would like returned in the List Caller Access Grants response. If the results include the pagination token NextToken, make another call using the NextToken to determine if there are more results", + args: { + name: "integer", + }, + }, + { + name: "--allowed-by-application", + description: + "If this optional parameter is passed in the request, a filter is applied to the results. The results will include only the access grants for the caller's Identity Center application or for any other applications (ALL)", + }, + { + name: "--no-allowed-by-application", + description: + "If this optional parameter is passed in the request, a filter is applied to the results. The results will include only the access grants for the caller's Identity Center application or for any other applications (ALL)", + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-jobs", description: @@ -3236,7 +3326,7 @@ const completionSpec: Fig.Spec = { { name: "--account-id", description: - "The ID of the Amazon Web Services account that is making this request", + "The Amazon Web Services account ID of the S3 Access Grants instance", args: { name: "string", }, @@ -4055,7 +4145,7 @@ const completionSpec: Fig.Spec = { { name: "--account-id", description: - "The ID of the Amazon Web Services account that is making this request", + "The Amazon Web Services account ID of the S3 Access Grants instance", args: { name: "string", }, diff --git a/src/aws/sagemaker-runtime.ts b/src/aws/sagemaker-runtime.ts index 082cb2b96af0..918338e264d5 100644 --- a/src/aws/sagemaker-runtime.ts +++ b/src/aws/sagemaker-runtime.ts @@ -94,6 +94,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--session-id", + description: + "Creates a stateful session or identifies an existing one. You can do one of the following: Create a stateful session by specifying the value NEW_SESSION. Send your request to an existing stateful session by specifying the ID of that session. With a stateful session, you can send multiple requests to a stateful model. When you create a session with a stateful model, the model must create the session ID and set the expiration time. The model must also provide that information in the response to your request. You can get the ID and timestamp from the NewSessionId response parameter. For any subsequent request where you specify that session ID, SageMaker routes the request to the same instance that supports the session", + args: { + name: "string", + }, + }, { name: "outfile", description: "Filename where the content will be saved", diff --git a/src/aws/sagemaker.ts b/src/aws/sagemaker.ts index 132049f89321..67af8a581599 100644 --- a/src/aws/sagemaker.ts +++ b/src/aws/sagemaker.ts @@ -781,6 +781,22 @@ const completionSpec: Fig.Spec = { name: "list", }, }, + { + name: "--orchestrator", + description: + 'The type of orchestrator to use for the SageMaker HyperPod cluster. Currently, the only supported value is "eks", which is to use an Amazon Elastic Kubernetes Service (EKS) cluster as the orchestrator', + args: { + name: "structure", + }, + }, + { + name: "--node-recovery", + description: + "The node recovery mode for the SageMaker HyperPod cluster. When set to Automatic, SageMaker HyperPod will automatically reboot or replace faulty nodes when issues are detected. When set to None, cluster administrators will need to manually manage any faulty cluster instances", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -4685,7 +4701,7 @@ const completionSpec: Fig.Spec = { { name: "--environment", description: - "The environment variables to set in the Docker container. We support up to 16 key and values entries in the map", + "The environment variables to set in the Docker container. Don't include any sensitive data in your environment variables. We support up to 16 key and values entries in the map", args: { name: "map", }, @@ -19188,6 +19204,14 @@ const completionSpec: Fig.Spec = { name: "list", }, }, + { + name: "--node-recovery", + description: + "The node recovery mode to be applied to the SageMaker HyperPod cluster", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/securityhub.ts b/src/aws/securityhub.ts index d4750a8c06fe..40fa9111c012 100644 --- a/src/aws/securityhub.ts +++ b/src/aws/securityhub.ts @@ -1,7 +1,7 @@ const completionSpec: Fig.Spec = { name: "securityhub", description: - "Security Hub provides you with a comprehensive view of your security state in Amazon Web Services and helps you assess your Amazon Web Services environment against security industry standards and best practices. Security Hub collects security data across Amazon Web Services accounts, Amazon Web Services, and supported third-party products and helps you analyze your security trends and identify the highest priority security issues. To help you manage the security state of your organization, Security Hub supports multiple security standards. These include the Amazon Web Services Foundational Security Best Practices (FSBP) standard developed by Amazon Web Services, and external compliance frameworks such as the Center for Internet Security (CIS), the Payment Card Industry Data Security Standard (PCI DSS), and the National Institute of Standards and Technology (NIST). Each standard includes several security controls, each of which represents a security best practice. Security Hub runs checks against security controls and generates control findings to help you assess your compliance against security best practices. In addition to generating control findings, Security Hub also receives findings from other Amazon Web Services, such as Amazon GuardDuty and Amazon Inspector, and supported third-party products. This gives you a single pane of glass into a variety of security-related issues. You can also send Security Hub findings to other Amazon Web Services and supported third-party products. Security Hub offers automation features that help you triage and remediate security issues. For example, you can use automation rules to automatically update critical findings when a security check fails. You can also leverage the integration with Amazon EventBridge to trigger automatic responses to specific findings. This guide, the Security Hub API Reference, provides information about the Security Hub API. This includes supported resources, HTTP methods, parameters, and schemas. If you're new to Security Hub, you might find it helpful to also review the Security Hub User Guide . The user guide explains key concepts and provides procedures that demonstrate how to use Security Hub features. It also provides information about topics such as integrating Security Hub with other Amazon Web Services. In addition to interacting with Security Hub by making calls to the Security Hub API, you can use a current version of an Amazon Web Services command line tool or SDK. Amazon Web Services provides tools and SDKs that consist of libraries and sample code for various languages and platforms, such as PowerShell, Java, Go, Python, C++, and .NET. These tools and SDKs provide convenient, programmatic access to Security Hub and other Amazon Web Services . They also handle tasks such as signing requests, managing errors, and retrying requests automatically. For information about installing and using the Amazon Web Services tools and SDKs, see Tools to Build on Amazon Web Services. With the exception of operations that are related to central configuration, Security Hub API requests are executed only in the Amazon Web Services Region that is currently active or in the specific Amazon Web Services Region that you specify in your request. Any configuration or settings change that results from the operation is applied only to that Region. To make the same change in other Regions, call the same API operation in each Region in which you want to apply the change. When you use central configuration, API requests for enabling Security Hub, standards, and controls are executed in the home Region and all linked Regions. For a list of central configuration operations, see the Central configuration terms and concepts section of the Security Hub User Guide. The following throttling limits apply to Security Hub API operations. BatchEnableStandards - RateLimit of 1 request per second. BurstLimit of 1 request per second. GetFindings - RateLimit of 3 requests per second. BurstLimit of 6 requests per second. BatchImportFindings - RateLimit of 10 requests per second. BurstLimit of 30 requests per second. BatchUpdateFindings - RateLimit of 10 requests per second. BurstLimit of 30 requests per second. UpdateStandardsControl - RateLimit of 1 request per second. BurstLimit of 5 requests per second. All other operations - RateLimit of 10 requests per second. BurstLimit of 30 requests per second", + "Security Hub provides you with a comprehensive view of your security state in Amazon Web Services and helps you assess your Amazon Web Services environment against security industry standards and best practices. Security Hub collects security data across Amazon Web Services accounts, Amazon Web Servicesservices, and supported third-party products and helps you analyze your security trends and identify the highest priority security issues. To help you manage the security state of your organization, Security Hub supports multiple security standards. These include the Amazon Web Services Foundational Security Best Practices (FSBP) standard developed by Amazon Web Services, and external compliance frameworks such as the Center for Internet Security (CIS), the Payment Card Industry Data Security Standard (PCI DSS), and the National Institute of Standards and Technology (NIST). Each standard includes several security controls, each of which represents a security best practice. Security Hub runs checks against security controls and generates control findings to help you assess your compliance against security best practices. In addition to generating control findings, Security Hub also receives findings from other Amazon Web Servicesservices, such as Amazon GuardDuty and Amazon Inspector, and supported third-party products. This gives you a single pane of glass into a variety of security-related issues. You can also send Security Hub findings to other Amazon Web Servicesservices and supported third-party products. Security Hub offers automation features that help you triage and remediate security issues. For example, you can use automation rules to automatically update critical findings when a security check fails. You can also leverage the integration with Amazon EventBridge to trigger automatic responses to specific findings. This guide, the Security Hub API Reference, provides information about the Security Hub API. This includes supported resources, HTTP methods, parameters, and schemas. If you're new to Security Hub, you might find it helpful to also review the Security Hub User Guide . The user guide explains key concepts and provides procedures that demonstrate how to use Security Hub features. It also provides information about topics such as integrating Security Hub with other Amazon Web Servicesservices. In addition to interacting with Security Hub by making calls to the Security Hub API, you can use a current version of an Amazon Web Services command line tool or SDK. Amazon Web Services provides tools and SDKs that consist of libraries and sample code for various languages and platforms, such as PowerShell, Java, Go, Python, C++, and .NET. These tools and SDKs provide convenient, programmatic access to Security Hub and other Amazon Web Servicesservices . They also handle tasks such as signing requests, managing errors, and retrying requests automatically. For information about installing and using the Amazon Web Services tools and SDKs, see Tools to Build on Amazon Web Services. With the exception of operations that are related to central configuration, Security Hub API requests are executed only in the Amazon Web Services Region that is currently active or in the specific Amazon Web Services Region that you specify in your request. Any configuration or settings change that results from the operation is applied only to that Region. To make the same change in other Regions, call the same API operation in each Region in which you want to apply the change. When you use central configuration, API requests for enabling Security Hub, standards, and controls are executed in the home Region and all linked Regions. For a list of central configuration operations, see the Central configuration terms and concepts section of the Security Hub User Guide. The following throttling limits apply to Security Hub API operations. BatchEnableStandards - RateLimit of 1 request per second. BurstLimit of 1 request per second. GetFindings - RateLimit of 3 requests per second. BurstLimit of 6 requests per second. BatchImportFindings - RateLimit of 10 requests per second. BurstLimit of 30 requests per second. BatchUpdateFindings - RateLimit of 10 requests per second. BurstLimit of 30 requests per second. UpdateStandardsControl - RateLimit of 1 request per second. BurstLimit of 5 requests per second. All other operations - RateLimit of 10 requests per second. BurstLimit of 30 requests per second", subcommands: [ { name: "accept-administrator-invitation", @@ -697,7 +697,7 @@ const completionSpec: Fig.Spec = { { name: "--region-linking-mode", description: - "Indicates whether to aggregate findings from all of the available Regions in the current partition. Also determines whether to automatically aggregate findings from new Regions as Security Hub supports them and you opt into them. The selected option also determines how to use the Regions provided in the Regions list. The options are as follows: ALL_REGIONS - Indicates to aggregate findings from all of the Regions where Security Hub is enabled. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them. ALL_REGIONS_EXCEPT_SPECIFIED - Indicates to aggregate findings from all of the Regions where Security Hub is enabled, except for the Regions listed in the Regions parameter. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them. SPECIFIED_REGIONS - Indicates to aggregate findings only from the Regions listed in the Regions parameter. Security Hub does not automatically aggregate findings from new Regions", + "Indicates whether to aggregate findings from all of the available Regions in the current partition. Also determines whether to automatically aggregate findings from new Regions as Security Hub supports them and you opt into them. The selected option also determines how to use the Regions provided in the Regions list. The options are as follows: ALL_REGIONS - Aggregates findings from all of the Regions where Security Hub is enabled. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them. ALL_REGIONS_EXCEPT_SPECIFIED - Aggregates findings from all of the Regions where Security Hub is enabled, except for the Regions listed in the Regions parameter. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them. SPECIFIED_REGIONS - Aggregates findings only from the Regions listed in the Regions parameter. Security Hub does not automatically aggregate findings from new Regions. NO_REGIONS - Aggregates no data because no Regions are selected as linked Regions", args: { name: "string", }, @@ -705,7 +705,7 @@ const completionSpec: Fig.Spec = { { name: "--regions", description: - "If RegionLinkingMode is ALL_REGIONS_EXCEPT_SPECIFIED, then this is a space-separated list of Regions that do not aggregate findings to the aggregation Region. If RegionLinkingMode is SPECIFIED_REGIONS, then this is a space-separated list of Regions that do aggregate findings to the aggregation Region", + "If RegionLinkingMode is ALL_REGIONS_EXCEPT_SPECIFIED, then this is a space-separated list of Regions that do not aggregate findings to the aggregation Region. If RegionLinkingMode is SPECIFIED_REGIONS, then this is a space-separated list of Regions that do aggregate findings to the aggregation Region. An InvalidInputException error results if you populate this field while RegionLinkingMode is NO_REGIONS", args: { name: "list", }, @@ -3198,7 +3198,7 @@ const completionSpec: Fig.Spec = { { name: "--region-linking-mode", description: - "Indicates whether to aggregate findings from all of the available Regions in the current partition. Also determines whether to automatically aggregate findings from new Regions as Security Hub supports them and you opt into them. The selected option also determines how to use the Regions provided in the Regions list. The options are as follows: ALL_REGIONS - Indicates to aggregate findings from all of the Regions where Security Hub is enabled. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them. ALL_REGIONS_EXCEPT_SPECIFIED - Indicates to aggregate findings from all of the Regions where Security Hub is enabled, except for the Regions listed in the Regions parameter. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them. SPECIFIED_REGIONS - Indicates to aggregate findings only from the Regions listed in the Regions parameter. Security Hub does not automatically aggregate findings from new Regions", + "Indicates whether to aggregate findings from all of the available Regions in the current partition. Also determines whether to automatically aggregate findings from new Regions as Security Hub supports them and you opt into them. The selected option also determines how to use the Regions provided in the Regions list. The options are as follows: ALL_REGIONS - Aggregates findings from all of the Regions where Security Hub is enabled. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them. ALL_REGIONS_EXCEPT_SPECIFIED - Aggregates findings from all of the Regions where Security Hub is enabled, except for the Regions listed in the Regions parameter. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them. SPECIFIED_REGIONS - Aggregates findings only from the Regions listed in the Regions parameter. Security Hub does not automatically aggregate findings from new Regions. NO_REGIONS - Aggregates no data because no Regions are selected as linked Regions", args: { name: "string", }, @@ -3206,7 +3206,7 @@ const completionSpec: Fig.Spec = { { name: "--regions", description: - "If RegionLinkingMode is ALL_REGIONS_EXCEPT_SPECIFIED, then this is a space-separated list of Regions that do not aggregate findings to the aggregation Region. If RegionLinkingMode is SPECIFIED_REGIONS, then this is a space-separated list of Regions that do aggregate findings to the aggregation Region", + "If RegionLinkingMode is ALL_REGIONS_EXCEPT_SPECIFIED, then this is a space-separated list of Regions that do not aggregate findings to the aggregation Region. If RegionLinkingMode is SPECIFIED_REGIONS, then this is a space-separated list of Regions that do aggregate findings to the aggregation Region. An InvalidInputException error results if you populate this field while RegionLinkingMode is NO_REGIONS", args: { name: "list", }, @@ -3233,7 +3233,7 @@ const completionSpec: Fig.Spec = { { name: "update-findings", description: - "UpdateFindings is a deprecated operation. Instead of UpdateFindings, use the BatchUpdateFindings operation. Updates the Note and RecordState of the Security Hub-aggregated findings that the filter attributes specify. Any member account that can view the finding also sees the update to the finding. Finding updates made with UpdateFindings might not be persisted if the same finding is later updated by the finding provider through the BatchImportFindings operation", + "UpdateFindings is a deprecated operation. Instead of UpdateFindings, use the BatchUpdateFindings operation. The UpdateFindings operation updates the Note and RecordState of the Security Hub aggregated findings that the filter attributes specify. Any member account that can view the finding can also see the update to the finding. Finding updates made with UpdateFindings aren't persisted if the same finding is later updated by the finding provider through the BatchImportFindings operation. In addition, Security Hub doesn't record updates made with UpdateFindings in the finding history", options: [ { name: "--filters", diff --git a/src/aws/ssm.ts b/src/aws/ssm.ts index 0d3f6944f54b..7b575ad0a06e 100644 --- a/src/aws/ssm.ts +++ b/src/aws/ssm.ts @@ -180,7 +180,7 @@ const completionSpec: Fig.Spec = { { name: "create-activation", description: - "Generates an activation code and activation ID you can use to register your on-premises servers, edge devices, or virtual machine (VM) with Amazon Web Services Systems Manager. Registering these machines with Systems Manager makes it possible to manage them using Systems Manager capabilities. You use the activation code and ID when installing SSM Agent on machines in your hybrid environment. For more information about requirements for managing on-premises machines using Systems Manager, see Setting up Amazon Web Services Systems Manager for hybrid and multicloud environments in the Amazon Web Services Systems Manager User Guide. Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, and on-premises servers and VMs that are configured for Systems Manager are all called managed nodes", + "Generates an activation code and activation ID you can use to register your on-premises servers, edge devices, or virtual machine (VM) with Amazon Web Services Systems Manager. Registering these machines with Systems Manager makes it possible to manage them using Systems Manager capabilities. You use the activation code and ID when installing SSM Agent on machines in your hybrid environment. For more information about requirements for managing on-premises machines using Systems Manager, see Using Amazon Web Services Systems Manager in hybrid and multicloud environments in the Amazon Web Services Systems Manager User Guide. Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, and on-premises servers and VMs that are configured for Systems Manager are all called managed nodes", options: [ { name: "--description", @@ -201,7 +201,7 @@ const completionSpec: Fig.Spec = { { name: "--iam-role", description: - "The name of the Identity and Access Management (IAM) role that you want to assign to the managed node. This IAM role must provide AssumeRole permissions for the Amazon Web Services Systems Manager service principal ssm.amazonaws.com. For more information, see Create an IAM service role for a hybrid and multicloud environment in the Amazon Web Services Systems Manager User Guide. You can't specify an IAM service-linked role for this parameter. You must create a unique role", + "The name of the Identity and Access Management (IAM) role that you want to assign to the managed node. This IAM role must provide AssumeRole permissions for the Amazon Web Services Systems Manager service principal ssm.amazonaws.com. For more information, see Create the IAM service role required for Systems Manager in a hybrid and multicloud environments in the Amazon Web Services Systems Manager User Guide. You can't specify an IAM service-linked role for this parameter. You must create a unique role", args: { name: "string", }, @@ -217,7 +217,7 @@ const completionSpec: Fig.Spec = { { name: "--expiration-date", description: - 'The date by which this activation request should expire, in timestamp format, such as "2021-07-07T00:00:00". You can specify a date up to 30 days in advance. If you don\'t provide an expiration date, the activation code expires in 24 hours', + 'The date by which this activation request should expire, in timestamp format, such as "2024-07-07T00:00:00". You can specify a date up to 30 days in advance. If you don\'t provide an expiration date, the activation code expires in 24 hours', args: { name: "timestamp", }, @@ -296,7 +296,7 @@ const completionSpec: Fig.Spec = { { name: "--targets", description: - "The targets for the association. You can target managed nodes by using tags, Amazon Web Services resource groups, all managed nodes in an Amazon Web Services account, or individual managed node IDs. You can target all managed nodes in an Amazon Web Services account by specifying the InstanceIds key with a value of *. For more information about choosing targets for an association, see About targets and rate controls in State Manager associations in the Amazon Web Services Systems Manager User Guide", + "The targets for the association. You can target managed nodes by using tags, Amazon Web Services resource groups, all managed nodes in an Amazon Web Services account, or individual managed node IDs. You can target all managed nodes in an Amazon Web Services account by specifying the InstanceIds key with a value of *. For more information about choosing targets for an association, see Understanding targets and rate controls in State Manager associations in the Amazon Web Services Systems Manager User Guide", args: { name: "list", }, @@ -937,7 +937,7 @@ const completionSpec: Fig.Spec = { { name: "--approved-patches", description: - "A list of explicitly approved patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide", + "A list of explicitly approved patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see Package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide", args: { name: "list", }, @@ -963,7 +963,7 @@ const completionSpec: Fig.Spec = { { name: "--rejected-patches", description: - "A list of explicitly rejected patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide", + "A list of explicitly rejected patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see Package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide", args: { name: "list", }, @@ -1028,7 +1028,7 @@ const completionSpec: Fig.Spec = { { name: "create-resource-data-sync", description: - "A resource data sync helps you view data from multiple sources in a single location. Amazon Web Services Systems Manager offers two types of resource data sync: SyncToDestination and SyncFromSource. You can configure Systems Manager Inventory to use the SyncToDestination type to synchronize Inventory data from multiple Amazon Web Services Regions to a single Amazon Simple Storage Service (Amazon S3) bucket. For more information, see Configuring resource data sync for Inventory in the Amazon Web Services Systems Manager User Guide. You can configure Systems Manager Explorer to use the SyncFromSource type to synchronize operational work items (OpsItems) and operational data (OpsData) from multiple Amazon Web Services Regions to a single Amazon S3 bucket. This type can synchronize OpsItems and OpsData from multiple Amazon Web Services accounts and Amazon Web Services Regions or EntireOrganization by using Organizations. For more information, see Setting up Systems Manager Explorer to display data from multiple accounts and Regions in the Amazon Web Services Systems Manager User Guide. A resource data sync is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data. To check the status of a sync, use the ListResourceDataSync. By default, data isn't encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy", + "A resource data sync helps you view data from multiple sources in a single location. Amazon Web Services Systems Manager offers two types of resource data sync: SyncToDestination and SyncFromSource. You can configure Systems Manager Inventory to use the SyncToDestination type to synchronize Inventory data from multiple Amazon Web Services Regions to a single Amazon Simple Storage Service (Amazon S3) bucket. For more information, see Creatinga a resource data sync for Inventory in the Amazon Web Services Systems Manager User Guide. You can configure Systems Manager Explorer to use the SyncFromSource type to synchronize operational work items (OpsItems) and operational data (OpsData) from multiple Amazon Web Services Regions to a single Amazon S3 bucket. This type can synchronize OpsItems and OpsData from multiple Amazon Web Services accounts and Amazon Web Services Regions or EntireOrganization by using Organizations. For more information, see Setting up Systems Manager Explorer to display data from multiple accounts and Regions in the Amazon Web Services Systems Manager User Guide. A resource data sync is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data. To check the status of a sync, use the ListResourceDataSync. By default, data isn't encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy", options: [ { name: "--sync-name", @@ -2785,7 +2785,7 @@ const completionSpec: Fig.Spec = { { name: "--filters", description: - "Each element in the array is a structure containing a key-value pair. Supported keys for DescribeInstancePatchesinclude the following: Classification Sample values: Security | SecurityUpdates KBId Sample values: KB4480056 | java-1.7.0-openjdk.x86_64 Severity Sample values: Important | Medium | Low State Sample values: Installed | InstalledOther | InstalledPendingReboot For lists of all State values, see Understanding patch compliance state values in the Amazon Web Services Systems Manager User Guide", + "Each element in the array is a structure containing a key-value pair. Supported keys for DescribeInstancePatchesinclude the following: Classification Sample values: Security | SecurityUpdates KBId Sample values: KB4480056 | java-1.7.0-openjdk.x86_64 Severity Sample values: Important | Medium | Low State Sample values: Installed | InstalledOther | InstalledPendingReboot For lists of all State values, see Patch compliance state values in the Amazon Web Services Systems Manager User Guide", args: { name: "list", }, @@ -3181,7 +3181,7 @@ const completionSpec: Fig.Spec = { { name: "--filters", description: - "Each entry in the array is a structure containing: Key. A string between 1 and 128 characters. Supported keys include ExecutedBefore and ExecutedAfter. Values. An array of strings, each between 1 and 256 characters. Supported values are date/time strings in a valid ISO 8601 date/time format, such as 2021-11-04T05:00:00Z", + "Each entry in the array is a structure containing: Key. A string between 1 and 128 characters. Supported keys include ExecutedBefore and ExecutedAfter. Values. An array of strings, each between 1 and 256 characters. Supported values are date/time strings in a valid ISO 8601 date/time format, such as 2024-11-04T05:00:00Z", args: { name: "list", }, @@ -7214,7 +7214,7 @@ const completionSpec: Fig.Spec = { { name: "--service-role-arn", description: - "The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up maintenance windows in the in the Amazon Web Services Systems Manager User Guide", + "The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up Maintenance Windows in the in the Amazon Web Services Systems Manager User Guide", args: { name: "string", }, @@ -7738,7 +7738,7 @@ const completionSpec: Fig.Spec = { { name: "--targets", description: - "A key-value mapping to target resources. Required if you specify TargetParameterName", + "A key-value mapping to target resources. Required if you specify TargetParameterName. If both this parameter and the TargetLocation:Targets parameter are supplied, TargetLocation:Targets takes precedence", args: { name: "list", }, @@ -7754,7 +7754,7 @@ const completionSpec: Fig.Spec = { { name: "--max-concurrency", description: - "The maximum number of targets allowed to run this task in parallel. You can specify a number, such as 10, or a percentage, such as 10%. The default value is 10", + "The maximum number of targets allowed to run this task in parallel. You can specify a number, such as 10, or a percentage, such as 10%. The default value is 10. If both this parameter and the TargetLocation:TargetsMaxConcurrency are supplied, TargetLocation:TargetsMaxConcurrency takes precedence", args: { name: "string", }, @@ -7762,7 +7762,7 @@ const completionSpec: Fig.Spec = { { name: "--max-errors", description: - "The number of errors that are allowed before the system stops running the automation on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops running the automation when the fourth error is received. If you specify 0, then the system stops running the automation on additional targets after the first error result is returned. If you run an automation on 50 resources and set max-errors to 10%, then the system stops running the automation on additional targets when the sixth error is received. Executions that are already running an automation when max-errors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set max-concurrency to 1 so the executions proceed one at a time", + "The number of errors that are allowed before the system stops running the automation on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops running the automation when the fourth error is received. If you specify 0, then the system stops running the automation on additional targets after the first error result is returned. If you run an automation on 50 resources and set max-errors to 10%, then the system stops running the automation on additional targets when the sixth error is received. Executions that are already running an automation when max-errors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set max-concurrency to 1 so the executions proceed one at a time. If this parameter and the TargetLocation:TargetsMaxErrors parameter are both supplied, TargetLocation:TargetsMaxErrors takes precedence", args: { name: "string", }, @@ -7770,7 +7770,7 @@ const completionSpec: Fig.Spec = { { name: "--target-locations", description: - "A location is a combination of Amazon Web Services Regions and/or Amazon Web Services accounts where you want to run the automation. Use this operation to start an automation in multiple Amazon Web Services Regions and multiple Amazon Web Services accounts. For more information, see Running Automation workflows in multiple Amazon Web Services Regions and Amazon Web Services accounts in the Amazon Web Services Systems Manager User Guide", + "A location is a combination of Amazon Web Services Regions and/or Amazon Web Services accounts where you want to run the automation. Use this operation to start an automation in multiple Amazon Web Services Regions and multiple Amazon Web Services accounts. For more information, see Running automations in multiple Amazon Web Services Regions and accounts in the Amazon Web Services Systems Manager User Guide", args: { name: "list", }, @@ -7791,6 +7791,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--target-locations-url", + description: + "Specify a publicly accessible URL for a file that contains the TargetLocations body. Currently, only files in presigned Amazon S3 buckets are supported", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -8745,7 +8753,7 @@ const completionSpec: Fig.Spec = { { name: "--service-role-arn", description: - "The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up maintenance windows in the in the Amazon Web Services Systems Manager User Guide", + "The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up Maintenance Windows in the in the Amazon Web Services Systems Manager User Guide", args: { name: "string", }, @@ -8873,7 +8881,7 @@ const completionSpec: Fig.Spec = { { name: "--iam-role", description: - "The name of the Identity and Access Management (IAM) role that you want to assign to the managed node. This IAM role must provide AssumeRole permissions for the Amazon Web Services Systems Manager service principal ssm.amazonaws.com. For more information, see Create an IAM service role for a hybrid and multicloud environment in the Amazon Web Services Systems Manager User Guide. You can't specify an IAM service-linked role for this parameter. You must create a unique role", + "The name of the Identity and Access Management (IAM) role that you want to assign to the managed node. This IAM role must provide AssumeRole permissions for the Amazon Web Services Systems Manager service principal ssm.amazonaws.com. For more information, see Create the IAM service role required for Systems Manager in hybrid and multicloud environments in the Amazon Web Services Systems Manager User Guide. You can't specify an IAM service-linked role for this parameter. You must create a unique role", args: { name: "string", }, @@ -8953,7 +8961,7 @@ const completionSpec: Fig.Spec = { { name: "--status", description: - "The OpsItem status. Status can be Open, In Progress, or Resolved. For more information, see Editing OpsItem details in the Amazon Web Services Systems Manager User Guide", + "The OpsItem status. For more information, see Editing OpsItem details in the Amazon Web Services Systems Manager User Guide", args: { name: "string", }, @@ -9129,7 +9137,7 @@ const completionSpec: Fig.Spec = { { name: "--approved-patches", description: - "A list of explicitly approved patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide", + "A list of explicitly approved patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see Package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide", args: { name: "list", }, @@ -9155,7 +9163,7 @@ const completionSpec: Fig.Spec = { { name: "--rejected-patches", description: - "A list of explicitly rejected patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide", + "A list of explicitly rejected patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see Package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide", args: { name: "list", }, diff --git a/src/aws/stepfunctions.ts b/src/aws/stepfunctions.ts index c87d7de4148a..473eec426ac5 100644 --- a/src/aws/stepfunctions.ts +++ b/src/aws/stepfunctions.ts @@ -1821,6 +1821,22 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--severity", + description: + "Minimum level of diagnostics to return. ERROR returns only ERROR diagnostics, whereas WARNING returns both WARNING and ERROR diagnostics. The default is ERROR", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum number of diagnostics that are returned per call. The default and maximum value is 100. Setting the value to 0 will also use the default of 100. If the number of diagnostics returned in the response exceeds maxResults, the value of the truncated field in the response will be set to true", + args: { + name: "integer", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/storagegateway.ts b/src/aws/storagegateway.ts index 809ba453df54..e44ec66aacca 100644 --- a/src/aws/storagegateway.ts +++ b/src/aws/storagegateway.ts @@ -672,20 +672,28 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--encryption-type", + description: + "A value that specifies the type of server-side encryption that the file share will use for the data that it stores in Amazon S3. We recommend using EncryptionType instead of KMSEncrypted to set the file share encryption method. You do not need to provide values for both parameters. If values for both parameters exist in the same request, then the specified encryption methods must not conflict. For example, if EncryptionType is SseS3, then KMSEncrypted must be false. If EncryptionType is SseKms or DsseKms, then KMSEncrypted must be true", + args: { + name: "string", + }, + }, { name: "--kms-encrypted", description: - "Set to true to use Amazon S3 server-side encryption with your own KMS key, or false to use a key managed by Amazon S3. Optional. Valid Values: true | false", + "Optional. Set to true to use Amazon S3 server-side encryption with your own KMS key (SSE-KMS), or false to use a key managed by Amazon S3 (SSE-S3). To use dual-layer encryption (DSSE-KMS), set the EncryptionType parameter instead. We recommend using EncryptionType instead of KMSEncrypted to set the file share encryption method. You do not need to provide values for both parameters. If values for both parameters exist in the same request, then the specified encryption methods must not conflict. For example, if EncryptionType is SseS3, then KMSEncrypted must be false. If EncryptionType is SseKms or DsseKms, then KMSEncrypted must be true. Valid Values: true | false", }, { name: "--no-kms-encrypted", description: - "Set to true to use Amazon S3 server-side encryption with your own KMS key, or false to use a key managed by Amazon S3. Optional. Valid Values: true | false", + "Optional. Set to true to use Amazon S3 server-side encryption with your own KMS key (SSE-KMS), or false to use a key managed by Amazon S3 (SSE-S3). To use dual-layer encryption (DSSE-KMS), set the EncryptionType parameter instead. We recommend using EncryptionType instead of KMSEncrypted to set the file share encryption method. You do not need to provide values for both parameters. If values for both parameters exist in the same request, then the specified encryption methods must not conflict. For example, if EncryptionType is SseS3, then KMSEncrypted must be false. If EncryptionType is SseKms or DsseKms, then KMSEncrypted must be true. Valid Values: true | false", }, { name: "--kms-key", description: - "The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value can only be set when KMSEncrypted is true. Optional", + "Optional. The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value must be set if KMSEncrypted is true, or if EncryptionType is SseKms or DsseKms", args: { name: "string", }, @@ -794,7 +802,7 @@ const completionSpec: Fig.Spec = { { name: "--notification-policy", description: - 'The notification policy of the file share. SettlingTimeInSeconds controls the number of seconds to wait after the last point in time a client wrote to a file before generating an ObjectUploaded notification. Because clients can make many small writes to files, it\'s best to set this parameter for as long as possible to avoid generating multiple notifications for the same file in a small time period. SettlingTimeInSeconds has no effect on the timing of the object uploading to Amazon S3, only the timing of the notification. The following example sets NotificationPolicy on with SettlingTimeInSeconds set to 60. {\\"Upload\\": {\\"SettlingTimeInSeconds\\": 60}} The following example sets NotificationPolicy off. {}', + 'The notification policy of the file share. SettlingTimeInSeconds controls the number of seconds to wait after the last point in time a client wrote to a file before generating an ObjectUploaded notification. Because clients can make many small writes to files, it\'s best to set this parameter for as long as possible to avoid generating multiple notifications for the same file in a small time period. SettlingTimeInSeconds has no effect on the timing of the object uploading to Amazon S3, only the timing of the notification. This setting is not meant to specify an exact time at which the notification will be sent. In some cases, the gateway might require more than the specified delay time to generate and send notifications. The following example sets NotificationPolicy on with SettlingTimeInSeconds set to 60. {\\"Upload\\": {\\"SettlingTimeInSeconds\\": 60}} The following example sets NotificationPolicy off. {}', args: { name: "string", }, @@ -863,20 +871,28 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--encryption-type", + description: + "A value that specifies the type of server-side encryption that the file share will use for the data that it stores in Amazon S3. We recommend using EncryptionType instead of KMSEncrypted to set the file share encryption method. You do not need to provide values for both parameters. If values for both parameters exist in the same request, then the specified encryption methods must not conflict. For example, if EncryptionType is SseS3, then KMSEncrypted must be false. If EncryptionType is SseKms or DsseKms, then KMSEncrypted must be true", + args: { + name: "string", + }, + }, { name: "--kms-encrypted", description: - "Set to true to use Amazon S3 server-side encryption with your own KMS key, or false to use a key managed by Amazon S3. Optional. Valid Values: true | false", + "Optional. Set to true to use Amazon S3 server-side encryption with your own KMS key (SSE-KMS), or false to use a key managed by Amazon S3 (SSE-S3). To use dual-layer encryption (DSSE-KMS), set the EncryptionType parameter instead. We recommend using EncryptionType instead of KMSEncrypted to set the file share encryption method. You do not need to provide values for both parameters. If values for both parameters exist in the same request, then the specified encryption methods must not conflict. For example, if EncryptionType is SseS3, then KMSEncrypted must be false. If EncryptionType is SseKms or DsseKms, then KMSEncrypted must be true. Valid Values: true | false", }, { name: "--no-kms-encrypted", description: - "Set to true to use Amazon S3 server-side encryption with your own KMS key, or false to use a key managed by Amazon S3. Optional. Valid Values: true | false", + "Optional. Set to true to use Amazon S3 server-side encryption with your own KMS key (SSE-KMS), or false to use a key managed by Amazon S3 (SSE-S3). To use dual-layer encryption (DSSE-KMS), set the EncryptionType parameter instead. We recommend using EncryptionType instead of KMSEncrypted to set the file share encryption method. You do not need to provide values for both parameters. If values for both parameters exist in the same request, then the specified encryption methods must not conflict. For example, if EncryptionType is SseS3, then KMSEncrypted must be false. If EncryptionType is SseKms or DsseKms, then KMSEncrypted must be true. Valid Values: true | false", }, { name: "--kms-key", description: - "The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value can only be set when KMSEncrypted is true. Optional", + "Optional. The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value must be set if KMSEncrypted is true, or if EncryptionType is SseKms or DsseKms", args: { name: "string", }, @@ -946,12 +962,12 @@ const completionSpec: Fig.Spec = { { name: "--smbacl-enabled", description: - "Set this value to true to enable access control list (ACL) on the SMB file share. Set it to false to map file and directory permissions to the POSIX permissions. For more information, see Using Microsoft Windows ACLs to control access to an SMB file share in the Storage Gateway User Guide. Valid Values: true | false", + "Set this value to true to enable access control list (ACL) on the SMB file share. Set it to false to map file and directory permissions to the POSIX permissions. For more information, see Using Windows ACLs to limit SMB file share access in the Amazon S3 File Gateway User Guide. Valid Values: true | false", }, { name: "--no-smbacl-enabled", description: - "Set this value to true to enable access control list (ACL) on the SMB file share. Set it to false to map file and directory permissions to the POSIX permissions. For more information, see Using Microsoft Windows ACLs to control access to an SMB file share in the Storage Gateway User Guide. Valid Values: true | false", + "Set this value to true to enable access control list (ACL) on the SMB file share. Set it to false to map file and directory permissions to the POSIX permissions. For more information, see Using Windows ACLs to limit SMB file share access in the Amazon S3 File Gateway User Guide. Valid Values: true | false", }, { name: "--access-based-enumeration", @@ -1037,7 +1053,7 @@ const completionSpec: Fig.Spec = { { name: "--notification-policy", description: - 'The notification policy of the file share. SettlingTimeInSeconds controls the number of seconds to wait after the last point in time a client wrote to a file before generating an ObjectUploaded notification. Because clients can make many small writes to files, it\'s best to set this parameter for as long as possible to avoid generating multiple notifications for the same file in a small time period. SettlingTimeInSeconds has no effect on the timing of the object uploading to Amazon S3, only the timing of the notification. The following example sets NotificationPolicy on with SettlingTimeInSeconds set to 60. {\\"Upload\\": {\\"SettlingTimeInSeconds\\": 60}} The following example sets NotificationPolicy off. {}', + 'The notification policy of the file share. SettlingTimeInSeconds controls the number of seconds to wait after the last point in time a client wrote to a file before generating an ObjectUploaded notification. Because clients can make many small writes to files, it\'s best to set this parameter for as long as possible to avoid generating multiple notifications for the same file in a small time period. SettlingTimeInSeconds has no effect on the timing of the object uploading to Amazon S3, only the timing of the notification. This setting is not meant to specify an exact time at which the notification will be sent. In some cases, the gateway might require more than the specified delay time to generate and send notifications. The following example sets NotificationPolicy on with SettlingTimeInSeconds set to 60. {\\"Upload\\": {\\"SettlingTimeInSeconds\\": 60}} The following example sets NotificationPolicy off. {}', args: { name: "string", }, @@ -4388,7 +4404,7 @@ const completionSpec: Fig.Spec = { { name: "--software-update-preferences", description: - "A set of variables indicating the software update preferences for the gateway. Includes AutomaticUpdatePolicy field with the following inputs: ALL_VERSIONS - Enables regular gateway maintenance updates. EMERGENCY_VERSIONS_ONLY - Disables regular gateway maintenance updates", + "A set of variables indicating the software update preferences for the gateway. Includes AutomaticUpdatePolicy field with the following inputs: ALL_VERSIONS - Enables regular gateway maintenance updates. EMERGENCY_VERSIONS_ONLY - Disables regular gateway maintenance updates. The gateway will still receive emergency version updates on rare occasions if necessary to remedy highly critical security or durability issues. You will be notified before an emergency version update is applied. These updates are applied during your gateway's scheduled maintenance window", args: { name: "structure", }, @@ -4425,20 +4441,28 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--encryption-type", + description: + "A value that specifies the type of server-side encryption that the file share will use for the data that it stores in Amazon S3. We recommend using EncryptionType instead of KMSEncrypted to set the file share encryption method. You do not need to provide values for both parameters. If values for both parameters exist in the same request, then the specified encryption methods must not conflict. For example, if EncryptionType is SseS3, then KMSEncrypted must be false. If EncryptionType is SseKms or DsseKms, then KMSEncrypted must be true", + args: { + name: "string", + }, + }, { name: "--kms-encrypted", description: - "Set to true to use Amazon S3 server-side encryption with your own KMS key, or false to use a key managed by Amazon S3. Optional. Valid Values: true | false", + "Optional. Set to true to use Amazon S3 server-side encryption with your own KMS key (SSE-KMS), or false to use a key managed by Amazon S3 (SSE-S3). To use dual-layer encryption (DSSE-KMS), set the EncryptionType parameter instead. We recommend using EncryptionType instead of KMSEncrypted to set the file share encryption method. You do not need to provide values for both parameters. If values for both parameters exist in the same request, then the specified encryption methods must not conflict. For example, if EncryptionType is SseS3, then KMSEncrypted must be false. If EncryptionType is SseKms or DsseKms, then KMSEncrypted must be true. Valid Values: true | false", }, { name: "--no-kms-encrypted", description: - "Set to true to use Amazon S3 server-side encryption with your own KMS key, or false to use a key managed by Amazon S3. Optional. Valid Values: true | false", + "Optional. Set to true to use Amazon S3 server-side encryption with your own KMS key (SSE-KMS), or false to use a key managed by Amazon S3 (SSE-S3). To use dual-layer encryption (DSSE-KMS), set the EncryptionType parameter instead. We recommend using EncryptionType instead of KMSEncrypted to set the file share encryption method. You do not need to provide values for both parameters. If values for both parameters exist in the same request, then the specified encryption methods must not conflict. For example, if EncryptionType is SseS3, then KMSEncrypted must be false. If EncryptionType is SseKms or DsseKms, then KMSEncrypted must be true. Valid Values: true | false", }, { name: "--kms-key", description: - "The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value can only be set when KMSEncrypted is true. Optional", + "Optional. The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value must be set if KMSEncrypted is true, or if EncryptionType is SseKms or DsseKms", args: { name: "string", }, @@ -4530,7 +4554,7 @@ const completionSpec: Fig.Spec = { { name: "--notification-policy", description: - 'The notification policy of the file share. SettlingTimeInSeconds controls the number of seconds to wait after the last point in time a client wrote to a file before generating an ObjectUploaded notification. Because clients can make many small writes to files, it\'s best to set this parameter for as long as possible to avoid generating multiple notifications for the same file in a small time period. SettlingTimeInSeconds has no effect on the timing of the object uploading to Amazon S3, only the timing of the notification. The following example sets NotificationPolicy on with SettlingTimeInSeconds set to 60. {\\"Upload\\": {\\"SettlingTimeInSeconds\\": 60}} The following example sets NotificationPolicy off. {}', + 'The notification policy of the file share. SettlingTimeInSeconds controls the number of seconds to wait after the last point in time a client wrote to a file before generating an ObjectUploaded notification. Because clients can make many small writes to files, it\'s best to set this parameter for as long as possible to avoid generating multiple notifications for the same file in a small time period. SettlingTimeInSeconds has no effect on the timing of the object uploading to Amazon S3, only the timing of the notification. This setting is not meant to specify an exact time at which the notification will be sent. In some cases, the gateway might require more than the specified delay time to generate and send notifications. The following example sets NotificationPolicy on with SettlingTimeInSeconds set to 60. {\\"Upload\\": {\\"SettlingTimeInSeconds\\": 60}} The following example sets NotificationPolicy off. {}', args: { name: "string", }, @@ -4575,20 +4599,28 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--encryption-type", + description: + "A value that specifies the type of server-side encryption that the file share will use for the data that it stores in Amazon S3. We recommend using EncryptionType instead of KMSEncrypted to set the file share encryption method. You do not need to provide values for both parameters. If values for both parameters exist in the same request, then the specified encryption methods must not conflict. For example, if EncryptionType is SseS3, then KMSEncrypted must be false. If EncryptionType is SseKms or DsseKms, then KMSEncrypted must be true", + args: { + name: "string", + }, + }, { name: "--kms-encrypted", description: - "Set to true to use Amazon S3 server-side encryption with your own KMS key, or false to use a key managed by Amazon S3. Optional. Valid Values: true | false", + "Optional. Set to true to use Amazon S3 server-side encryption with your own KMS key (SSE-KMS), or false to use a key managed by Amazon S3 (SSE-S3). To use dual-layer encryption (DSSE-KMS), set the EncryptionType parameter instead. We recommend using EncryptionType instead of KMSEncrypted to set the file share encryption method. You do not need to provide values for both parameters. If values for both parameters exist in the same request, then the specified encryption methods must not conflict. For example, if EncryptionType is SseS3, then KMSEncrypted must be false. If EncryptionType is SseKms or DsseKms, then KMSEncrypted must be true. Valid Values: true | false", }, { name: "--no-kms-encrypted", description: - "Set to true to use Amazon S3 server-side encryption with your own KMS key, or false to use a key managed by Amazon S3. Optional. Valid Values: true | false", + "Optional. Set to true to use Amazon S3 server-side encryption with your own KMS key (SSE-KMS), or false to use a key managed by Amazon S3 (SSE-S3). To use dual-layer encryption (DSSE-KMS), set the EncryptionType parameter instead. We recommend using EncryptionType instead of KMSEncrypted to set the file share encryption method. You do not need to provide values for both parameters. If values for both parameters exist in the same request, then the specified encryption methods must not conflict. For example, if EncryptionType is SseS3, then KMSEncrypted must be false. If EncryptionType is SseKms or DsseKms, then KMSEncrypted must be true. Valid Values: true | false", }, { name: "--kms-key", description: - "The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value can only be set when KMSEncrypted is true. Optional", + "Optional. The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value must be set if KMSEncrypted is true, or if EncryptionType is SseKms or DsseKms", args: { name: "string", }, @@ -4642,12 +4674,12 @@ const completionSpec: Fig.Spec = { { name: "--smbacl-enabled", description: - "Set this value to true to enable access control list (ACL) on the SMB file share. Set it to false to map file and directory permissions to the POSIX permissions. For more information, see Using Microsoft Windows ACLs to control access to an SMB file share in the Storage Gateway User Guide. Valid Values: true | false", + "Set this value to true to enable access control list (ACL) on the SMB file share. Set it to false to map file and directory permissions to the POSIX permissions. For more information, see Using Windows ACLs to limit SMB file share access in the Amazon S3 File Gateway User Guide. Valid Values: true | false", }, { name: "--no-smbacl-enabled", description: - "Set this value to true to enable access control list (ACL) on the SMB file share. Set it to false to map file and directory permissions to the POSIX permissions. For more information, see Using Microsoft Windows ACLs to control access to an SMB file share in the Storage Gateway User Guide. Valid Values: true | false", + "Set this value to true to enable access control list (ACL) on the SMB file share. Set it to false to map file and directory permissions to the POSIX permissions. For more information, see Using Windows ACLs to limit SMB file share access in the Amazon S3 File Gateway User Guide. Valid Values: true | false", }, { name: "--access-based-enumeration", @@ -4717,7 +4749,7 @@ const completionSpec: Fig.Spec = { { name: "--notification-policy", description: - 'The notification policy of the file share. SettlingTimeInSeconds controls the number of seconds to wait after the last point in time a client wrote to a file before generating an ObjectUploaded notification. Because clients can make many small writes to files, it\'s best to set this parameter for as long as possible to avoid generating multiple notifications for the same file in a small time period. SettlingTimeInSeconds has no effect on the timing of the object uploading to Amazon S3, only the timing of the notification. The following example sets NotificationPolicy on with SettlingTimeInSeconds set to 60. {\\"Upload\\": {\\"SettlingTimeInSeconds\\": 60}} The following example sets NotificationPolicy off. {}', + 'The notification policy of the file share. SettlingTimeInSeconds controls the number of seconds to wait after the last point in time a client wrote to a file before generating an ObjectUploaded notification. Because clients can make many small writes to files, it\'s best to set this parameter for as long as possible to avoid generating multiple notifications for the same file in a small time period. SettlingTimeInSeconds has no effect on the timing of the object uploading to Amazon S3, only the timing of the notification. This setting is not meant to specify an exact time at which the notification will be sent. In some cases, the gateway might require more than the specified delay time to generate and send notifications. The following example sets NotificationPolicy on with SettlingTimeInSeconds set to 60. {\\"Upload\\": {\\"SettlingTimeInSeconds\\": 60}} The following example sets NotificationPolicy off. {}', args: { name: "string", }, diff --git a/src/aws/supplychain.ts b/src/aws/supplychain.ts index ff47142ce40a..274448f328cc 100644 --- a/src/aws/supplychain.ts +++ b/src/aws/supplychain.ts @@ -89,7 +89,7 @@ const completionSpec: Fig.Spec = { { name: "send-data-integration-event", description: - "Send transactional data events with real-time data for analysis or monitoring", + "Send the transactional data payload for the event with real-time data for analysis or monitoring. The real-time data events are stored in an Amazon Web Services service before being processed and stored in data lake. New data events are synced with data lake at 5 PM GMT everyday. The updated transactional data is available in data lake after ingestion", options: [ { name: "--instance-id", @@ -107,7 +107,8 @@ const completionSpec: Fig.Spec = { }, { name: "--data", - description: "The data payload of the event", + description: + "The data payload of the event. For more information on the data schema to use, see Data entities supported in AWS Supply Chain", args: { name: "string", }, diff --git a/src/aws/synthetics.ts b/src/aws/synthetics.ts index d0a0ed6825ac..39ef6c39c932 100644 --- a/src/aws/synthetics.ts +++ b/src/aws/synthetics.ts @@ -128,10 +128,18 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--resources-to-replicate-tags", + description: + "To have the tags that you apply to this canary also be applied to the Lambda function that the canary uses, specify this parameter with the value lambda-function. If you specify this parameter and don't specify any tags in the Tags parameter, the canary creation fails", + args: { + name: "list", + }, + }, { name: "--tags", description: - "A list of key-value pairs to associate with the canary. You can associate as many as 50 tags with a canary. Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only the resources that have certain tag values", + "A list of key-value pairs to associate with the canary. You can associate as many as 50 tags with a canary. Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only the resources that have certain tag values. To have the tags that you apply to this canary also be applied to the Lambda function that the canary uses, specify this parameter with the value lambda-function", args: { name: "map", }, @@ -206,7 +214,7 @@ const completionSpec: Fig.Spec = { { name: "delete-canary", description: - "Permanently deletes the specified canary. If you specify DeleteLambda to true, CloudWatch Synthetics also deletes the Lambda functions and layers that are used by the canary. Other resources used and created by the canary are not automatically deleted. After you delete a canary that you do not intend to use again, you should also delete the following: The CloudWatch alarms created for this canary. These alarms have a name of Synthetics-SharpDrop-Alarm-MyCanaryName . Amazon S3 objects and buckets, such as the canary's artifact location. IAM roles created for the canary. If they were created in the console, these roles have the name role/service-role/CloudWatchSyntheticsRole-MyCanaryName . CloudWatch Logs log groups created for the canary. These logs groups have the name /aws/lambda/cwsyn-MyCanaryName . Before you delete a canary, you might want to use GetCanary to display the information about this canary. Make note of the information returned by this operation so that you can delete these resources after you delete the canary", + "Permanently deletes the specified canary. If you specify DeleteLambda to true, CloudWatch Synthetics also deletes the Lambda functions and layers that are used by the canary. Other resources used and created by the canary are not automatically deleted. After you delete a canary that you do not intend to use again, you should also delete the following: The CloudWatch alarms created for this canary. These alarms have a name of Synthetics-Alarm-first-198-characters-of-canary-name-canaryId-alarm number Amazon S3 objects and buckets, such as the canary's artifact location. IAM roles created for the canary. If they were created in the console, these roles have the name role/service-role/CloudWatchSyntheticsRole-First-21-Characters-of-CanaryName CloudWatch Logs log groups created for the canary. These logs groups have the name /aws/lambda/cwsyn-First-21-Characters-of-CanaryName Before you delete a canary, you might want to use GetCanary to display the information about this canary. Make note of the information returned by this operation so that you can delete these resources after you delete the canary", options: [ { name: "--name", @@ -293,7 +301,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "Specify this parameter to limit how many canaries are returned each time you use the DescribeCanaries operation. If you omit this parameter, the default of 100 is used", + "Specify this parameter to limit how many canaries are returned each time you use the DescribeCanaries operation. If you omit this parameter, the default of 20 is used", args: { name: "integer", }, diff --git a/src/aws/timestream-influxdb.ts b/src/aws/timestream-influxdb.ts index 8e3d6cd43003..4be1bc949c09 100644 --- a/src/aws/timestream-influxdb.ts +++ b/src/aws/timestream-influxdb.ts @@ -552,6 +552,22 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--db-instance-type", + description: + "The Timestream for InfluxDB DB instance type to run InfluxDB on", + args: { + name: "string", + }, + }, + { + name: "--deployment-type", + description: + "Specifies whether the DB instance will be deployed as a standalone instance or with a Multi-AZ standby for high availability", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/wafv2.ts b/src/aws/wafv2.ts index 9a9cc14b5fac..566921361152 100644 --- a/src/aws/wafv2.ts +++ b/src/aws/wafv2.ts @@ -2296,7 +2296,7 @@ const completionSpec: Fig.Spec = { { name: "put-permission-policy", description: - "Attaches an IAM policy to the specified resource. Use this to share a rule group across accounts. You must be the owner of the rule group to perform this operation. This action is subject to the following restrictions: You can attach only one policy with each PutPermissionPolicy request. The ARN in the request must be a valid WAF RuleGroup ARN and the rule group must exist in the same Region. The user making the request must be the owner of the rule group", + "Use this to share a rule group with other accounts. This action attaches an IAM policy to the specified resource. You must be the owner of the rule group to perform this operation. This action is subject to the following restrictions: You can attach only one policy with each PutPermissionPolicy request. The ARN in the request must be a valid WAF RuleGroup ARN and the rule group must exist in the same Region. The user making the request must be the owner of the rule group. If a rule group has been shared with your account, you can access it through the call GetRuleGroup, and you can reference it in CreateWebACL and UpdateWebACL. Rule groups that are shared with you don't appear in your WAF console rule groups listing", options: [ { name: "--resource-arn", diff --git a/src/aws/workspaces.ts b/src/aws/workspaces.ts index 58d20cfa2641..75e43eba1182 100644 --- a/src/aws/workspaces.ts +++ b/src/aws/workspaces.ts @@ -1876,6 +1876,13 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--filters", + description: "The filter condition for the WorkSpaces", + args: { + name: "list", + }, + }, { name: "--cli-input-json", description: @@ -3340,6 +3347,21 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--idc-instance-arn", + description: + "The Amazon Resource Name (ARN) of the identity center instance", + args: { + name: "string", + }, + }, + { + name: "--microsoft-entra-config", + description: "The details about Microsoft Entra config", + args: { + name: "structure", + }, + }, { name: "--workspace-type", description: @@ -3484,7 +3506,7 @@ const completionSpec: Fig.Spec = { { name: "start-workspaces", description: - "Starts the specified WorkSpaces. You cannot start a WorkSpace unless it has a running mode of AutoStop and a state of STOPPED", + "Starts the specified WorkSpaces. You cannot start a WorkSpace unless it has a running mode of AutoStop or Manual and a state of STOPPED", options: [ { name: "--start-workspace-requests", @@ -3547,7 +3569,7 @@ const completionSpec: Fig.Spec = { { name: "stop-workspaces", description: - "Stops the specified WorkSpaces. You cannot stop a WorkSpace unless it has a running mode of AutoStop and a state of AVAILABLE, IMPAIRED, UNHEALTHY, or ERROR", + "Stops the specified WorkSpaces. You cannot stop a WorkSpace unless it has a running mode of AutoStop or Manual and a state of AVAILABLE, IMPAIRED, UNHEALTHY, or ERROR", options: [ { name: "--stop-workspace-requests",