From 80ffa82a5ec81d8631ddaa2596d02836feca3f8e Mon Sep 17 00:00:00 2001 From: figbot <82115609+withfig-bot@users.noreply.github.com> Date: Tue, 5 Nov 2024 16:34:04 -0800 Subject: [PATCH] feat: update spec --- src/aws.ts | 30 +- src/aws/amp.ts | 83 +++- src/aws/appconfig.ts | 14 +- src/aws/appsync.ts | 522 +++++++++++++++++++- src/aws/autoscaling.ts | 2 +- src/aws/batch.ts | 10 +- src/aws/bedrock-agent.ts | 4 +- src/aws/bedrock.ts | 110 ++++- src/aws/cleanrooms.ts | 77 +-- src/aws/codebuild.ts | 24 + src/aws/connect.ts | 66 ++- src/aws/datasync.ts | 60 ++- src/aws/docdb-elastic.ts | 151 ++++++ src/aws/ecs.ts | 147 +++++- src/aws/elbv2.ts | 51 +- src/aws/geo-maps.ts | 303 ++++++++++++ src/aws/geo-places.ts | 703 ++++++++++++++++++++++++++ src/aws/geo-routes.ts | 713 +++++++++++++++++++++++++++ src/aws/glue.ts | 325 ++++++++++++ src/aws/keyspaces.ts | 209 +++++++- src/aws/logs.ts | 34 +- src/aws/mediapackagev2.ts | 375 ++++++++++++++ src/aws/mwaa.ts | 74 ++- src/aws/network-firewall.ts | 2 +- src/aws/opensearch.ts | 280 +++++++++++ src/aws/opensearchserverless.ts | 16 + src/aws/payment-cryptography-data.ts | 26 +- src/aws/rds.ts | 94 +++- src/aws/redshift-data.ts | 74 ++- src/aws/redshift-serverless.ts | 16 + src/aws/redshift.ts | 13 +- src/aws/route53.ts | 6 +- src/aws/sagemaker.ts | 81 ++- src/aws/socialmessaging.ts | 1 - src/aws/storagegateway.ts | 16 +- src/aws/supplychain.ts | 47 +- src/aws/taxsettings.ts | 130 ++++- src/aws/workmail.ts | 373 +++++++++++++- 38 files changed, 5049 insertions(+), 213 deletions(-) create mode 100644 src/aws/geo-maps.ts create mode 100644 src/aws/geo-places.ts create mode 100644 src/aws/geo-routes.ts diff --git a/src/aws.ts b/src/aws.ts index 37bf4cbc043f..561ae40fa3ff 100644 --- a/src/aws.ts +++ b/src/aws.ts @@ -903,6 +903,24 @@ const completionSpec: Fig.Spec = { "Amazon GameLift provides solutions for hosting session-based multiplayer game servers in the cloud, including tools for deploying, operating, and scaling game servers. Built on Amazon Web Services global computing infrastructure, GameLift helps you deliver high-performance, high-reliability, low-cost game servers while dynamically scaling your resource usage to meet player demand. About Amazon GameLift solutions Get more information on these Amazon GameLift solutions in the Amazon GameLift Developer Guide. Amazon GameLift managed hosting -- Amazon GameLift offers a fully managed service to set up and maintain computing machines for hosting, manage game session and player session life cycle, and handle security, storage, and performance tracking. You can use automatic scaling tools to balance player demand and hosting costs, configure your game session management to minimize player latency, and add FlexMatch for matchmaking. Managed hosting with Realtime Servers -- With Amazon GameLift Realtime Servers, you can quickly configure and set up ready-to-go game servers for your game. Realtime Servers provides a game server framework with core Amazon GameLift infrastructure already built in. Then use the full range of Amazon GameLift managed hosting features, including FlexMatch, for your game. Amazon GameLift FleetIQ -- Use Amazon GameLift FleetIQ as a standalone service while hosting your games using EC2 instances and Auto Scaling groups. Amazon GameLift FleetIQ provides optimizations for game hosting, including boosting the viability of low-cost Spot Instances gaming. For a complete solution, pair the Amazon GameLift FleetIQ and FlexMatch standalone services. Amazon GameLift FlexMatch -- Add matchmaking to your game hosting solution. FlexMatch is a customizable matchmaking service for multiplayer games. Use FlexMatch as integrated with Amazon GameLift managed hosting or incorporate FlexMatch as a standalone service into your own hosting solution. About this API Reference This reference guide describes the low-level service API for Amazon GameLift. With each topic in this guide, you can find links to language-specific SDK guides and the Amazon Web Services CLI reference. Useful links: Amazon GameLift API operations listed by tasks Amazon GameLift tools and resources", loadSpec: "aws/gamelift", }, + { + name: "geo-maps", + description: + "Integrate high-quality base map data into your applications using MapLibre. Capabilities include: Access to comprehensive base map data, allowing you to tailor the map display to your specific needs. Multiple pre-designed map styles suited for various application types, such as navigation, logistics, or data visualization. Generation of static map images for scenarios where interactive maps aren't suitable, such as: Embedding in emails or documents Displaying in low-bandwidth environments Creating printable maps Enhancing application performance by reducing client-side rendering", + loadSpec: "aws/geo-maps", + }, + { + name: "geo-places", + description: + "The Places API enables powerful location search and geocoding capabilities for your applications, offering global coverage with rich, detailed information. Key features include: Forward and reverse geocoding for addresses and coordinates Comprehensive place searches with detailed information, including: Business names and addresses Contact information Hours of operation POI (Points of Interest) categories Food types for restaurants Chain affiliation for relevant businesses Global data coverage with a wide range of POI categories Regular data updates to ensure accuracy and relevance", + loadSpec: "aws/geo-places", + }, + { + name: "geo-routes", + description: + "With the Amazon Location Routes API you can calculate routes and estimate travel time based on up-to-date road network and live traffic information. Calculate optimal travel routes and estimate travel times using up-to-date road network and traffic data. Key features include: Point-to-point routing with estimated travel time, distance, and turn-by-turn directions Multi-point route optimization to minimize travel time or distance Route matrices for efficient multi-destination planning Isoline calculations to determine reachable areas within specified time or distance thresholds Map-matching to align GPS traces with the road network", + loadSpec: "aws/geo-routes", + }, { name: "glacier", description: @@ -1483,7 +1501,7 @@ const completionSpec: Fig.Spec = { { name: "mwaa", description: - "Amazon Managed Workflows for Apache Airflow This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation. For more information, see What is Amazon MWAA?. Endpoints api.airflow.{region}.amazonaws.com - This endpoint is used for environment management. CreateEnvironment DeleteEnvironment GetEnvironment ListEnvironments ListTagsForResource TagResource UntagResource UpdateEnvironment env.airflow.{region}.amazonaws.com - This endpoint is used to operate the Airflow environment. CreateCliToken CreateWebLoginToken Regions For a list of supported regions, see Amazon MWAA endpoints and quotas in the Amazon Web Services General Reference", + "Amazon Managed Workflows for Apache Airflow This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation. For more information, see What is Amazon MWAA?. Endpoints api.airflow.{region}.amazonaws.com - This endpoint is used for environment management. CreateEnvironment DeleteEnvironment GetEnvironment ListEnvironments ListTagsForResource TagResource UntagResource UpdateEnvironment env.airflow.{region}.amazonaws.com - This endpoint is used to operate the Airflow environment. CreateCliToken CreateWebLoginToken InvokeRestApi Regions For a list of supported regions, see Amazon MWAA endpoints and quotas in the Amazon Web Services General Reference", loadSpec: "aws/mwaa", }, { @@ -1507,7 +1525,7 @@ const completionSpec: Fig.Spec = { { name: "network-firewall", description: - "This is the API Reference for Network Firewall. This guide is for developers who need detailed information about the Network Firewall API actions, data types, and errors. The REST API requires you to handle connection details, such as calculating signatures, handling request retries, and error handling. For general information about using the Amazon Web Services REST APIs, see Amazon Web Services APIs. To access Network Firewall using the REST API endpoint: https://network-firewall..amazonaws.com Alternatively, you can use one of the Amazon Web Services SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see Amazon Web Services SDKs. For descriptions of Network Firewall features, including and step-by-step instructions on how to use them through the Network Firewall console, see the Network Firewall Developer Guide. Network Firewall is a stateful, managed, network firewall and intrusion detection and prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the perimeter of your VPC. This includes filtering traffic going to and coming from an internet gateway, NAT gateway, or over VPN or Direct Connect. Network Firewall uses rules that are compatible with Suricata, a free, open source network analysis and threat detection engine. Network Firewall supports Suricata version 6.0.9. For information about Suricata, see the Suricata website. You can use Network Firewall to monitor and protect your VPC traffic in a number of ways. The following are just a few examples: Allow domains or IP addresses for known Amazon Web Services service endpoints, such as Amazon S3, and block all other forms of traffic. Use custom lists of known bad domains to limit the types of domain names that your applications can access. Perform deep packet inspection on traffic entering or leaving your VPC. Use stateful protocol detection to filter protocols like HTTPS, regardless of the port used. To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide. To start using Network Firewall, do the following: (Optional) If you don't already have a VPC that you want to protect, create it in Amazon VPC. In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a subnet for the sole use of Network Firewall. In Network Firewall, create stateless and stateful rule groups, to define the components of the network traffic filtering behavior that you want your firewall to have. In Network Firewall, create a firewall policy that uses your rule groups and specifies additional default traffic filtering behavior. In Network Firewall, create a firewall and specify your new firewall policy and VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you specify, with the behavior that's defined in the firewall policy. In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall endpoints", + "This is the API Reference for Network Firewall. This guide is for developers who need detailed information about the Network Firewall API actions, data types, and errors. The REST API requires you to handle connection details, such as calculating signatures, handling request retries, and error handling. For general information about using the Amazon Web Services REST APIs, see Amazon Web Services APIs. To access Network Firewall using the REST API endpoint: https://network-firewall..amazonaws.com Alternatively, you can use one of the Amazon Web Services SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see Amazon Web Services SDKs. For descriptions of Network Firewall features, including and step-by-step instructions on how to use them through the Network Firewall console, see the Network Firewall Developer Guide. Network Firewall is a stateful, managed, network firewall and intrusion detection and prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the perimeter of your VPC. This includes filtering traffic going to and coming from an internet gateway, NAT gateway, or over VPN or Direct Connect. Network Firewall uses rules that are compatible with Suricata, a free, open source network analysis and threat detection engine. You can use Network Firewall to monitor and protect your VPC traffic in a number of ways. The following are just a few examples: Allow domains or IP addresses for known Amazon Web Services service endpoints, such as Amazon S3, and block all other forms of traffic. Use custom lists of known bad domains to limit the types of domain names that your applications can access. Perform deep packet inspection on traffic entering or leaving your VPC. Use stateful protocol detection to filter protocols like HTTPS, regardless of the port used. To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide. To start using Network Firewall, do the following: (Optional) If you don't already have a VPC that you want to protect, create it in Amazon VPC. In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a subnet for the sole use of Network Firewall. In Network Firewall, create stateless and stateful rule groups, to define the components of the network traffic filtering behavior that you want your firewall to have. In Network Firewall, create a firewall policy that uses your rule groups and specifies additional default traffic filtering behavior. In Network Firewall, create a firewall and specify your new firewall policy and VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you specify, with the behavior that's defined in the firewall policy. In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall endpoints", loadSpec: "aws/network-firewall", }, { @@ -1522,12 +1540,6 @@ const completionSpec: Fig.Spec = { "Amazon CloudWatch Network Monitor is an Amazon Web Services active network monitoring service that identifies if a network issues exists within the Amazon Web Services network or your own company network. Within Network Monitor you'll choose the source VPCs and subnets from the Amazon Web Services network in which you operate and then you'll choose the destination IP addresses from your on-premises network. From these sources and destinations, Network Monitor creates a monitor containing all the possible source and destination combinations, each of which is called a probe, within a single monitor. These probes then monitor network traffic to help you identify where network issues might be affecting your traffic. Before you begin, ensure the Amazon Web Services CLI is configured in the Amazon Web Services Account where you will create the Network Monitor resource. Network Monitor doesn\u2019t support creation on cross-account resources, but you can create a Network Monitor in any subnet belonging to a VPC owned by your Account. For more information, see Using Amazon CloudWatch Network Monitor in the Amazon CloudWatch User Guide", loadSpec: "aws/networkmonitor", }, - { - name: "nimble", - description: - "Welcome to the Amazon Nimble Studio API reference. This API reference provides methods, schema, resources, parameters, and more to help you get the most out of Nimble Studio. Nimble Studio is a virtual studio that empowers visual effects, animation, and interactive content teams to create content securely within a scalable, private cloud service", - loadSpec: "aws/nimble", - }, { name: "oam", description: @@ -2115,7 +2127,7 @@ const completionSpec: Fig.Spec = { { name: "storagegateway", description: - "Storage Gateway Service Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and the Amazon Web Services storage infrastructure. The service enables you to securely upload data to the Amazon Web Services Cloud for cost effective backup and rapid disaster recovery. Use the following links to get started using the Storage Gateway Service API Reference: Storage Gateway required request headers: Describes the required headers that you must send with every POST request to Storage Gateway. Signing requests: Storage Gateway requires that you authenticate every request you send; this topic describes how sign such a request. Error responses: Provides reference information about Storage Gateway errors. Operations in Storage Gateway: Contains detailed descriptions of all Storage Gateway operations, their request parameters, response elements, possible errors, and examples of requests and responses. Storage Gateway endpoints and quotas: Provides a list of each Amazon Web Services Region and the endpoints available for use with Storage Gateway. Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected. IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS resource IDs. For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following: arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG. A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee. For more information, see Announcement: Heads-up \u2013 Longer Storage Gateway volume and snapshot IDs coming in 2016", + "Storage Gateway Service Amazon FSx File Gateway is no longer available to new customers. Existing customers of FSx File Gateway can continue to use the service normally. For capabilities similar to FSx File Gateway, visit this blog post. Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and the Amazon Web Services storage infrastructure. The service enables you to securely upload data to the Amazon Web Services Cloud for cost effective backup and rapid disaster recovery. Use the following links to get started using the Storage Gateway Service API Reference: Storage Gateway required request headers: Describes the required headers that you must send with every POST request to Storage Gateway. Signing requests: Storage Gateway requires that you authenticate every request you send; this topic describes how sign such a request. Error responses: Provides reference information about Storage Gateway errors. Operations in Storage Gateway: Contains detailed descriptions of all Storage Gateway operations, their request parameters, response elements, possible errors, and examples of requests and responses. Storage Gateway endpoints and quotas: Provides a list of each Amazon Web Services Region and the endpoints available for use with Storage Gateway. Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected. IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS resource IDs. For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following: arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG. A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee. For more information, see Announcement: Heads-up \u2013 Longer Storage Gateway volume and snapshot IDs coming in 2016", loadSpec: "aws/storagegateway", }, { diff --git a/src/aws/amp.ts b/src/aws/amp.ts index 23f33d62ce2a..ab4c003f0ed1 100644 --- a/src/aws/amp.ts +++ b/src/aws/amp.ts @@ -67,7 +67,7 @@ const completionSpec: Fig.Spec = { { name: "--log-group-arn", description: - "The ARN of the CloudWatch log group to which the vended log data will be published. This log group must exist prior to calling this API", + "The ARN of the CloudWatch log group to which the vended log data will be published. This log group must exist prior to calling this operation", args: { name: "string", }, @@ -165,12 +165,12 @@ const completionSpec: Fig.Spec = { { name: "create-scraper", description: - "The CreateScraper operation creates a scraper to collect metrics. A scraper pulls metrics from Prometheus-compatible sources within an Amazon EKS cluster, and sends them to your Amazon Managed Service for Prometheus workspace. You can configure the scraper to control what metrics are collected, and what transformations are applied prior to sending them to your workspace. If needed, an IAM role will be created for you that gives Amazon Managed Service for Prometheus access to the metrics in your cluster. For more information, see Using roles for scraping metrics from EKS in the Amazon Managed Service for Prometheus User Guide. You cannot update a scraper. If you want to change the configuration of the scraper, create a new scraper and delete the old one. The scrapeConfiguration parameter contains the base64-encoded version of the YAML configuration file. For more information about collectors, including what metrics are collected, and how to configure the scraper, see Amazon Web Services managed collectors in the Amazon Managed Service for Prometheus User Guide", + "The CreateScraper operation creates a scraper to collect metrics. A scraper pulls metrics from Prometheus-compatible sources within an Amazon EKS cluster, and sends them to your Amazon Managed Service for Prometheus workspace. Scrapers are flexible, and can be configured to control what metrics are collected, the frequency of collection, what transformations are applied to the metrics, and more. An IAM role will be created for you that Amazon Managed Service for Prometheus uses to access the metrics in your cluster. You must configure this role with a policy that allows it to scrape metrics from your cluster. For more information, see Configuring your Amazon EKS cluster in the Amazon Managed Service for Prometheus User Guide. The scrapeConfiguration parameter contains the base-64 encoded YAML configuration for the scraper. For more information about collectors, including what metrics are collected, and how to configure the scraper, see Using an Amazon Web Services managed collector in the Amazon Managed Service for Prometheus User Guide", options: [ { name: "--alias", description: - "(optional) a name to associate with the scraper. This is for your use, and does not need to be unique", + "(optional) An alias to associate with the scraper. This is for your use, and does not need to be unique", args: { name: "string", }, @@ -836,12 +836,12 @@ const completionSpec: Fig.Spec = { { name: "list-tags-for-resource", description: - "The ListTagsForResource operation returns the tags that are associated with an Amazon Managed Service for Prometheus resource. Currently, the only resources that can be tagged are workspaces and rule groups namespaces", + "The ListTagsForResource operation returns the tags that are associated with an Amazon Managed Service for Prometheus resource. Currently, the only resources that can be tagged are scrapers, workspaces, and rule groups namespaces", options: [ { name: "--resource-arn", description: - "The ARN of the resource to list tages for. Must be a workspace or rule groups namespace resource", + "The ARN of the resource to list tages for. Must be a workspace, scraper, or rule groups namespace resource", args: { name: "string", }, @@ -1044,12 +1044,11 @@ const completionSpec: Fig.Spec = { { name: "tag-resource", description: - "The TagResource operation associates tags with an Amazon Managed Service for Prometheus resource. The only resources that can be tagged are workspaces and rule groups namespaces. If you specify a new tag key for the resource, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag", + "The TagResource operation associates tags with an Amazon Managed Service for Prometheus resource. The only resources that can be tagged are rule groups namespaces, scrapers, and workspaces. If you specify a new tag key for the resource, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag. To remove a tag, use UntagResource", options: [ { name: "--resource-arn", - description: - "The ARN of the workspace or rule groups namespace to apply tags to", + description: "The ARN of the resource to apply tags to", args: { name: "string", }, @@ -1057,7 +1056,7 @@ const completionSpec: Fig.Spec = { { name: "--tags", description: - "The list of tag keys and values to associate with the resource. Keys may not begin with aws:", + "The list of tag keys and values to associate with the resource. Keys must not begin with aws:", args: { name: "map", }, @@ -1084,11 +1083,11 @@ const completionSpec: Fig.Spec = { { name: "untag-resource", description: - "Removes the specified tags from an Amazon Managed Service for Prometheus resource. The only resources that can be tagged are workspaces and rule groups namespaces", + "Removes the specified tags from an Amazon Managed Service for Prometheus resource. The only resources that can be tagged are rule groups namespaces, scrapers, and workspaces", options: [ { name: "--resource-arn", - description: "The ARN of the workspace or rule groups namespace", + description: "The ARN of the resource from which to remove a tag", args: { name: "string", }, @@ -1167,6 +1166,68 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "update-scraper", + description: + "Updates an existing scraper. You can't use this function to update the source from which the scraper is collecting metrics. To change the source, delete the scraper and create a new one", + options: [ + { + name: "--alias", + description: "The new alias of the scraper", + args: { + name: "string", + }, + }, + { + name: "--client-token", + description: + "A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive", + args: { + name: "string", + }, + }, + { + name: "--destination", + description: + "The new Amazon Managed Service for Prometheus workspace to send metrics to", + args: { + name: "structure", + }, + }, + { + name: "--scrape-configuration", + description: + "Contains the base-64 encoded YAML configuration for the scraper. For more information about configuring a scraper, see Using an Amazon Web Services managed collector in the Amazon Managed Service for Prometheus User Guide", + args: { + name: "structure", + }, + }, + { + name: "--scraper-id", + description: "The ID of the scraper to update", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "update-workspace-alias", description: "Updates the alias of an existing workspace", diff --git a/src/aws/appconfig.ts b/src/aws/appconfig.ts index 0f06aff409c2..14bced444574 100644 --- a/src/aws/appconfig.ts +++ b/src/aws/appconfig.ts @@ -852,7 +852,7 @@ const completionSpec: Fig.Spec = { { name: "--client-configuration-version", description: - "The configuration version returned in the most recent GetConfiguration response. AppConfig uses the value of the ClientConfigurationVersion parameter to identify the configuration version on your clients. If you don\u2019t send ClientConfigurationVersion with each call to GetConfiguration, your clients receive the current configuration. You are charged each time your clients receive a configuration. To avoid excess charges, we recommend you use the StartConfigurationSession and GetLatestConfiguration APIs, which track the client configuration version on your behalf. If you choose to continue using GetConfiguration, we recommend that you include the ClientConfigurationVersion value with every call to GetConfiguration. The value to use for ClientConfigurationVersion comes from the ConfigurationVersion attribute returned by GetConfiguration when there is new or updated data, and should be saved for subsequent calls to GetConfiguration. For more information about working with configurations, see Retrieving the Configuration in the AppConfig User Guide", + "The configuration version returned in the most recent GetConfiguration response. AppConfig uses the value of the ClientConfigurationVersion parameter to identify the configuration version on your clients. If you don\u2019t send ClientConfigurationVersion with each call to GetConfiguration, your clients receive the current configuration. You are charged each time your clients receive a configuration. To avoid excess charges, we recommend you use the StartConfigurationSession and GetLatestConfiguration APIs, which track the client configuration version on your behalf. If you choose to continue using GetConfiguration, we recommend that you include the ClientConfigurationVersion value with every call to GetConfiguration. The value to use for ClientConfigurationVersion comes from the ConfigurationVersion attribute returned by GetConfiguration when there is new or updated data, and should be saved for subsequent calls to GetConfiguration. For more information about working with configurations, see Retrieving feature flags and configuration data in AppConfig in the AppConfig User Guide", args: { name: "string", }, @@ -1847,7 +1847,7 @@ const completionSpec: Fig.Spec = { { name: "stop-deployment", description: - "Stops a deployment. This API action works only on deployments that have a status of DEPLOYING. This action moves the deployment to a status of ROLLED_BACK", + "Stops a deployment. This API action works only on deployments that have a status of DEPLOYING, unless an AllowRevert parameter is supplied. If the AllowRevert parameter is supplied, the status of an in-progress deployment will be ROLLED_BACK. The status of a completed deployment will be REVERTED. AppConfig only allows a revert within 72 hours of deployment completion", options: [ { name: "--application-id", @@ -1870,6 +1870,16 @@ const completionSpec: Fig.Spec = { name: "integer", }, }, + { + name: "--allow-revert", + description: + "A Boolean that enables AppConfig to rollback a COMPLETED deployment to the previous configuration version. This action moves the deployment to a status of REVERTED", + }, + { + name: "--no-allow-revert", + description: + "A Boolean that enables AppConfig to rollback a COMPLETED deployment to the previous configuration version. This action moves the deployment to a status of REVERTED", + }, { name: "--cli-input-json", description: diff --git a/src/aws/appsync.ts b/src/aws/appsync.ts index 1384272163a2..2fa244ae6397 100644 --- a/src/aws/appsync.ts +++ b/src/aws/appsync.ts @@ -149,6 +149,60 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-api", + description: + "Creates an Api object. Use this operation to create an AppSync API with your preferred configuration, such as an Event API that provides real-time message publishing and message subscriptions over WebSockets", + options: [ + { + name: "--name", + description: "The name for the Api", + args: { + name: "string", + }, + }, + { + name: "--owner-contact", + description: "The owner contact information for the Api", + args: { + name: "string", + }, + }, + { + name: "--tags", + description: + "A map with keys of TagKey objects and values of TagValue objects", + args: { + name: "map", + }, + }, + { + name: "--event-config", + description: + "The Event API configuration. This includes the default authorization configuration for connecting, publishing, and subscribing to an Event API", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-api-cache", description: "Creates a cache for the GraphQL API", @@ -277,6 +331,76 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-channel-namespace", + description: "Creates a ChannelNamespace for an Api", + options: [ + { + name: "--api-id", + description: "The Api ID", + args: { + name: "string", + }, + }, + { + name: "--name", + description: + "The name of the ChannelNamespace. This name must be unique within the Api", + args: { + name: "string", + }, + }, + { + name: "--subscribe-auth-modes", + description: + "The authorization mode to use for subscribing to messages on the channel namespace. This configuration overrides the default Api authorization configuration", + args: { + name: "list", + }, + }, + { + name: "--publish-auth-modes", + description: + "The authorization mode to use for publishing messages on the channel namespace. This configuration overrides the default Api authorization configuration", + args: { + name: "list", + }, + }, + { + name: "--code-handlers", + description: + "The event handler functions that run custom business logic to process published events and subscribe requests", + args: { + name: "string", + }, + }, + { + name: "--tags", + description: + "A map with keys of TagKey objects and values of TagValue objects", + args: { + name: "map", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-data-source", description: "Creates a DataSource object", @@ -619,25 +743,25 @@ const completionSpec: Fig.Spec = { }, }, { - name: "--visibility", + name: "--api-type", description: - "Sets the value of the GraphQL API to public (GLOBAL) or private (PRIVATE). If no value is provided, the visibility will be set to GLOBAL by default. This value cannot be changed once the API has been created", + "The value that indicates whether the GraphQL API is a standard API (GRAPHQL) or merged API (MERGED)", args: { name: "string", }, }, { - name: "--api-type", + name: "--merged-api-execution-role-arn", description: - "The value that indicates whether the GraphQL API is a standard API (GRAPHQL) or merged API (MERGED)", + "The Identity and Access Management service role ARN for a merged API. The AppSync service assumes this role on behalf of the Merged API to validate access to source APIs at runtime and to prompt the AUTO_MERGE to update the merged API endpoint with the source API changes automatically", args: { name: "string", }, }, { - name: "--merged-api-execution-role-arn", + name: "--visibility", description: - "The Identity and Access Management service role ARN for a merged API. The AppSync service assumes this role on behalf of the Merged API to validate access to source APIs at runtime and to prompt the AUTO_MERGE to update the merged API endpoint with the source API changes automatically", + "Sets the value of the GraphQL API to public (GLOBAL) or private (PRIVATE). If no value is provided, the visibility will be set to GLOBAL by default. This value cannot be changed once the API has been created", args: { name: "string", }, @@ -876,6 +1000,36 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-api", + description: "Deletes an Api object", + options: [ + { + name: "--api-id", + description: "The Api ID", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-api-cache", description: "Deletes an ApiCache object", @@ -943,6 +1097,43 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-channel-namespace", + description: "Deletes a ChannelNamespace", + options: [ + { + name: "--api-id", + description: "The ID of the Api associated with the ChannelNamespace", + args: { + name: "string", + }, + }, + { + name: "--name", + description: "The name of the ChannelNamespace", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-data-source", description: "Deletes a DataSource object", @@ -1394,6 +1585,36 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-api", + description: "Retrieves an Api object", + options: [ + { + name: "--api-id", + description: "The Api ID", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-api-association", description: "Retrieves an ApiAssociation object", @@ -1454,6 +1675,43 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-channel-namespace", + description: "Retrieves the channel namespace for a specified Api", + options: [ + { + name: "--api-id", + description: "The Api ID", + args: { + name: "string", + }, + }, + { + name: "--name", + description: "The name of the ChannelNamespace", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-data-source", description: "Retrieves a DataSource object", @@ -1507,12 +1765,12 @@ const completionSpec: Fig.Spec = { { name: "--include-models-sdl", description: - "A boolean flag that determines whether SDL should be generated for introspected types or not. If set to true, each model will contain an sdl property that contains the SDL for that type. The SDL only contains the type data and no additional metadata or directives", + "A boolean flag that determines whether SDL should be generated for introspected types. If set to true, each model will contain an sdl property that contains the SDL for that type. The SDL only contains the type data and no additional metadata or directives", }, { name: "--no-include-models-sdl", description: - "A boolean flag that determines whether SDL should be generated for introspected types or not. If set to true, each model will contain an sdl property that contains the SDL for that type. The SDL only contains the type data and no additional metadata or directives", + "A boolean flag that determines whether SDL should be generated for introspected types. If set to true, each model will contain an sdl property that contains the SDL for that type. The SDL only contains the type data and no additional metadata or directives", }, { name: "--next-token", @@ -1944,6 +2202,141 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-apis", + description: + "Lists the APIs in your AppSync account. ListApis returns only the high level API details. For more detailed information about an API, use GetApi", + options: [ + { + name: "--next-token", + description: + "An identifier that was returned from the previous call to this operation, which you can use to return the next set of items in the list", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum number of results that you want the request to return", + args: { + name: "integer", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-channel-namespaces", + description: + "Lists the channel namespaces for a specified Api. ListChannelNamespaces returns only high level details for the channel namespace. To retrieve code handlers, use GetChannelNamespace", + options: [ + { + name: "--api-id", + description: "The Api ID", + args: { + name: "string", + }, + }, + { + name: "--next-token", + description: + "An identifier that was returned from the previous call to this operation, which you can use to return the next set of items in the list", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum number of results that you want the request to return", + args: { + name: "integer", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-data-sources", description: "Lists the data sources for a given API", @@ -2867,6 +3260,58 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "update-api", + description: "Updates an Api", + options: [ + { + name: "--api-id", + description: "The Api ID", + args: { + name: "string", + }, + }, + { + name: "--name", + description: "The name of the Api", + args: { + name: "string", + }, + }, + { + name: "--owner-contact", + description: "The owner contact information for the Api", + args: { + name: "string", + }, + }, + { + name: "--event-config", + description: + "The new event configuration. This includes the default authorization configuration for connecting, publishing, and subscribing to an Event API", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "update-api-cache", description: "Updates the cache for the GraphQL API", @@ -2982,6 +3427,67 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "update-channel-namespace", + description: "Updates a ChannelNamespace associated with an Api", + options: [ + { + name: "--api-id", + description: "The Api ID", + args: { + name: "string", + }, + }, + { + name: "--name", + description: "The name of the ChannelNamespace", + args: { + name: "string", + }, + }, + { + name: "--subscribe-auth-modes", + description: + "The authorization mode to use for subscribing to messages on the channel namespace. This configuration overrides the default Api authorization configuration", + args: { + name: "list", + }, + }, + { + name: "--publish-auth-modes", + description: + "The authorization mode to use for publishing messages on the channel namespace. This configuration overrides the default Api authorization configuration", + args: { + name: "list", + }, + }, + { + name: "--code-handlers", + description: + "The event handler functions that run custom business logic to process published events and subscribe requests", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "update-data-source", description: "Updates a DataSource object", diff --git a/src/aws/autoscaling.ts b/src/aws/autoscaling.ts index 9175b71f2ebc..6b5b1098dac9 100644 --- a/src/aws/autoscaling.ts +++ b/src/aws/autoscaling.ts @@ -3437,7 +3437,7 @@ const completionSpec: Fig.Spec = { { name: "--preferences", description: - "Sets your preferences for the instance refresh so that it performs as expected when you start it. Includes the instance warmup time, the minimum and maximum healthy percentages, and the behaviors that you want Amazon EC2 Auto Scaling to use if instances that are in Standby state or protected from scale in are found. You can also choose to enable additional features, such as the following: Auto rollback Checkpoints CloudWatch alarms Skip matching", + "Sets your preferences for the instance refresh so that it performs as expected when you start it. Includes the instance warmup time, the minimum and maximum healthy percentages, and the behaviors that you want Amazon EC2 Auto Scaling to use if instances that are in Standby state or protected from scale in are found. You can also choose to enable additional features, such as the following: Auto rollback Checkpoints CloudWatch alarms Skip matching Bake time", args: { name: "structure", }, diff --git a/src/aws/batch.ts b/src/aws/batch.ts index a2f3e8ef35bc..b00c8c89317c 100644 --- a/src/aws/batch.ts +++ b/src/aws/batch.ts @@ -18,7 +18,7 @@ const completionSpec: Fig.Spec = { { name: "--reason", description: - "A message to attach to the job that explains the reason for canceling it. This message is returned by future DescribeJobs operations on the job. This message is also recorded in the Batch activity logs", + "A message to attach to the job that explains the reason for canceling it. This message is returned by future DescribeJobs operations on the job. It is also recorded in the Batch activity logs. This parameter has as limit of 1024 characters", args: { name: "string", }, @@ -161,7 +161,7 @@ const completionSpec: Fig.Spec = { { name: "--scheduling-policy-arn", description: - "The Amazon Resource Name (ARN) of the fair share scheduling policy. If this parameter is specified, the job queue uses a fair share scheduling policy. If this parameter isn't specified, the job queue uses a first in, first out (FIFO) scheduling policy. After a job queue is created, you can replace but can't remove the fair share scheduling policy. The format is aws:Partition:batch:Region:Account:scheduling-policy/Name . An example is aws:aws:batch:us-west-2:123456789012:scheduling-policy/MySchedulingPolicy", + "The Amazon Resource Name (ARN) of the fair share scheduling policy. Job queues that don't have a scheduling policy are scheduled in a first-in, first-out (FIFO) model. After a job queue has a scheduling policy, it can be replaced but can't be removed. The format is aws:Partition:batch:Region:Account:scheduling-policy/Name . An example is aws:aws:batch:us-west-2:123456789012:scheduling-policy/MySchedulingPolicy. A job queue without a scheduling policy is scheduled as a FIFO job queue and can't have a scheduling policy added. Jobs queues with a scheduling policy can have a maximum of 500 active fair share identifiers. When the limit has been reached, submissions of any jobs that add a new fair share identifier fail", args: { name: "string", }, @@ -193,7 +193,7 @@ const completionSpec: Fig.Spec = { { name: "--job-state-time-limit-actions", description: - "The set of actions that Batch performs on jobs that remain at the head of the job queue in the specified state longer than specified times. Batch will perform each action after maxTimeSeconds has passed", + "The set of actions that Batch performs on jobs that remain at the head of the job queue in the specified state longer than specified times. Batch will perform each action after maxTimeSeconds has passed. (Note: The minimum value for maxTimeSeconds is 600 (10 minutes) and its maximum value is 86,400 (24 hours).)", args: { name: "list", }, @@ -1250,7 +1250,7 @@ const completionSpec: Fig.Spec = { { name: "--reason", description: - "A message to attach to the job that explains the reason for canceling it. This message is returned by future DescribeJobs operations on the job. This message is also recorded in the Batch activity logs", + "A message to attach to the job that explains the reason for canceling it. This message is returned by future DescribeJobs operations on the job. It is also recorded in the Batch activity logs. This parameter has as limit of 1024 characters", args: { name: "string", }, @@ -1437,7 +1437,7 @@ const completionSpec: Fig.Spec = { { name: "--job-state-time-limit-actions", description: - "The set of actions that Batch perform on jobs that remain at the head of the job queue in the specified state longer than specified times. Batch will perform each action after maxTimeSeconds has passed", + "The set of actions that Batch perform on jobs that remain at the head of the job queue in the specified state longer than specified times. Batch will perform each action after maxTimeSeconds has passed. (Note: The minimum value for maxTimeSeconds is 600 (10 minutes) and its maximum value is 86,400 (24 hours).)", args: { name: "list", }, diff --git a/src/aws/bedrock-agent.ts b/src/aws/bedrock-agent.ts index 75d4afe7a957..156603ba8824 100644 --- a/src/aws/bedrock-agent.ts +++ b/src/aws/bedrock-agent.ts @@ -113,7 +113,7 @@ const completionSpec: Fig.Spec = { { name: "--foundation-model", description: - "The Amazon Resource Name (ARN) of the foundation model to be used for orchestration by the agent you create", + "The identifier for the model that you want to be used for orchestration by the agent you create. The modelId to provide depends on the type of model or throughput that you use: If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the Amazon Bedrock User Guide. If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see Supported Regions and models for cross-region inference in the Amazon Bedrock User Guide. If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see Run inference using a Provisioned Throughput in the Amazon Bedrock User Guide. If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User Guide. If you use an imported model, specify the ARN of the imported model. You can get the model ARN from a successful call to CreateModelImportJob or from the Imported models page in the Amazon Bedrock console", args: { name: "string", }, @@ -2967,7 +2967,7 @@ const completionSpec: Fig.Spec = { { name: "--foundation-model", description: - "Specifies a new foundation model to be used for orchestration by the agent", + "The identifier for the model that you want to be used for orchestration by the agent you create. The modelId to provide depends on the type of model or throughput that you use: If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the Amazon Bedrock User Guide. If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see Supported Regions and models for cross-region inference in the Amazon Bedrock User Guide. If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see Run inference using a Provisioned Throughput in the Amazon Bedrock User Guide. If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User Guide. If you use an imported model, specify the ARN of the imported model. You can get the model ARN from a successful call to CreateModelImportJob or from the Imported models page in the Amazon Bedrock console", args: { name: "string", }, diff --git a/src/aws/bedrock.ts b/src/aws/bedrock.ts index 19cb1a68bff9..05658ad12f2b 100644 --- a/src/aws/bedrock.ts +++ b/src/aws/bedrock.ts @@ -290,6 +290,68 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-inference-profile", + description: + "Creates an application inference profile to track metrics and costs when invoking a model. To create an application inference profile for a foundation model in one region, specify the ARN of the model in that region. To create an application inference profile for a foundation model across multiple regions, specify the ARN of the system-defined inference profile that contains the regions that you want to route requests to. For more information, see Increase throughput and resilience with cross-region inference in Amazon Bedrock. in the Amazon Bedrock User Guide", + options: [ + { + name: "--inference-profile-name", + description: "A name for the inference profile", + args: { + name: "string", + }, + }, + { + name: "--description", + description: "A description for the inference profile", + args: { + name: "string", + }, + }, + { + name: "--client-request-token", + description: + "A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency", + args: { + name: "string", + }, + }, + { + name: "--model-source", + description: + "The foundation model or system-defined inference profile that the inference profile will track metrics and costs for", + args: { + name: "structure", + }, + }, + { + name: "--tags", + description: + "An array of objects, each of which contains a tag and its value. For more information, see Tagging resources in the Amazon Bedrock User Guide", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-model-copy-job", description: @@ -834,6 +896,38 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-inference-profile", + description: + "Deletes an application inference profile. For more information, see Increase throughput and resilience with cross-region inference in Amazon Bedrock. in the Amazon Bedrock User Guide", + options: [ + { + name: "--inference-profile-identifier", + description: + "The Amazon Resource Name (ARN) or ID of the application inference profile to delete", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-model-invocation-logging-configuration", description: "Delete the invocation logging", @@ -1057,11 +1151,12 @@ const completionSpec: Fig.Spec = { { name: "get-inference-profile", description: - "Gets information about an inference profile. For more information, see the Amazon Bedrock User Guide", + "Gets information about an inference profile. For more information, see Increase throughput and resilience with cross-region inference in Amazon Bedrock. in the Amazon Bedrock User Guide", options: [ { name: "--inference-profile-identifier", - description: "The unique identifier of the inference profile", + description: + "The ID or Amazon Resource Name (ARN) of the inference profile", args: { name: "string", }, @@ -1733,7 +1828,8 @@ const completionSpec: Fig.Spec = { }, { name: "list-inference-profiles", - description: "Returns a list of inference profiles that you can use", + description: + "Returns a list of inference profiles that you can use. For more information, see Increase throughput and resilience with cross-region inference in Amazon Bedrock. in the Amazon Bedrock User Guide", options: [ { name: "--max-results", @@ -1751,6 +1847,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--type-equals", + description: + "Filters for inference profiles that match the type you specify. SYSTEM_DEFINED \u2013 The inference profile is defined by Amazon Bedrock. You can route inference requests across regions with these inference profiles. APPLICATION \u2013 The inference profile was created by a user. This type of inference profile can track metrics and costs when invoking the model in it. The inference profile may route requests to one or multiple regions", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/cleanrooms.ts b/src/aws/cleanrooms.ts index dc603351c15b..257e91f5702d 100644 --- a/src/aws/cleanrooms.ts +++ b/src/aws/cleanrooms.ts @@ -267,6 +267,13 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--analytics-engine", + description: "The analytics engine", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -2014,7 +2021,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -2022,7 +2029,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -2085,7 +2092,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -2093,7 +2100,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -2157,7 +2164,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -2165,7 +2172,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -2300,7 +2307,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -2308,7 +2315,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -2378,7 +2385,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -2386,7 +2393,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -2442,7 +2449,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -2450,7 +2457,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -2521,7 +2528,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -2529,7 +2536,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -2592,7 +2599,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -2600,7 +2607,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -2655,7 +2662,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -2663,7 +2670,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -2868,7 +2875,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -2876,7 +2883,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -2932,7 +2939,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -2940,7 +2947,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -3012,7 +3019,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -3020,7 +3027,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -3091,7 +3098,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -3099,7 +3106,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -3168,7 +3175,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -3176,7 +3183,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service can return a nextToken even if the maximum results has not been met", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -3238,8 +3245,7 @@ const completionSpec: Fig.Spec = { }, { name: "--schema-type", - description: - "If present, filter schemas by schema type. The only valid schema type is currently `TABLE`", + description: "If present, filter schemas by schema type", args: { name: "string", }, @@ -3247,7 +3253,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -3255,7 +3261,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -3447,6 +3453,13 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--compute-configuration", + description: "The compute configuration for the protected query", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/codebuild.ts b/src/aws/codebuild.ts index 43c4d71a8a23..4a938ef9ba0a 100644 --- a/src/aws/codebuild.ts +++ b/src/aws/codebuild.ts @@ -495,6 +495,14 @@ const completionSpec: Fig.Spec = { name: "integer", }, }, + { + name: "--auto-retry-limit", + description: + "The maximum number of additional automatic retries after a failed build. For example, if the auto-retry limit is set to 2, CodeBuild will call the RetryBuild API to automatically retry your build for up to 2 additional times", + args: { + name: "integer", + }, + }, { name: "--cli-input-json", description: @@ -2501,6 +2509,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--auto-retry-limit-override", + description: + "The maximum number of additional automatic retries after a failed build. For example, if the auto-retry limit is set to 2, CodeBuild will call the RetryBuild API to automatically retry your build for up to 2 additional times", + args: { + name: "integer", + }, + }, { name: "--cli-input-json", description: @@ -3135,6 +3151,14 @@ const completionSpec: Fig.Spec = { name: "integer", }, }, + { + name: "--auto-retry-limit", + description: + "The maximum number of additional automatic retries after a failed build. For example, if the auto-retry limit is set to 2, CodeBuild will call the RetryBuild API to automatically retry your build for up to 2 additional times", + args: { + name: "integer", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/connect.ts b/src/aws/connect.ts index fa3e1802c355..5a23e316191e 100644 --- a/src/aws/connect.ts +++ b/src/aws/connect.ts @@ -587,12 +587,13 @@ const completionSpec: Fig.Spec = { }, { name: "associate-traffic-distribution-group-user", - description: "Associates an agent with a traffic distribution group", + description: + "Associates an agent with a traffic distribution group. This API can be called only in the Region where the traffic distribution group is created", options: [ { name: "--traffic-distribution-group-id", description: - "The identifier of the traffic distribution group. This can be the ID or the ARN if the API is being called in the Region where the traffic distribution group was created. The ARN must be provided if the call is from the replicated Region", + "The identifier of the traffic distribution group. This can be the ID or the ARN of the traffic distribution group", args: { name: "string", }, @@ -2268,7 +2269,7 @@ const completionSpec: Fig.Spec = { { name: "create-traffic-distribution-group", description: - "Creates a traffic distribution group given an Amazon Connect instance that has been replicated. The SignInConfig distribution is available only on a default TrafficDistributionGroup (see the IsDefault parameter in the TrafficDistributionGroup data type). If you call UpdateTrafficDistribution with a modified SignInConfig and a non-default TrafficDistributionGroup, an InvalidRequestException is returned. For more information about creating traffic distribution groups, see Set up traffic distribution groups in the Amazon Connect Administrator Guide", + "Creates a traffic distribution group given an Amazon Connect instance that has been replicated. The SignInConfig distribution is available only on a default TrafficDistributionGroup (see the IsDefault parameter in the TrafficDistributionGroup data type). If you call UpdateTrafficDistribution with a modified SignInConfig and a non-default TrafficDistributionGroup, an InvalidRequestException is returned. For more information about creating traffic distribution groups, see Set up traffic distribution groups in the Amazon Connect Administrator Guide", options: [ { name: "--name", @@ -3418,7 +3419,7 @@ const completionSpec: Fig.Spec = { { name: "--traffic-distribution-group-id", description: - "The identifier of the traffic distribution group. This can be the ID or the ARN if the API is being called in the Region where the traffic distribution group was created. The ARN must be provided if the call is from the replicated Region", + "The identifier of the traffic distribution group. This can be the ID or the ARN of the traffic distribution group", args: { name: "string", }, @@ -5129,12 +5130,13 @@ const completionSpec: Fig.Spec = { }, { name: "disassociate-traffic-distribution-group-user", - description: "Disassociates an agent from a traffic distribution group", + description: + "Disassociates an agent from a traffic distribution group. This API can be called only in the Region where the traffic distribution group is created", options: [ { name: "--traffic-distribution-group-id", description: - "The identifier of the traffic distribution group. This can be the ID or the ARN if the API is being called in the Region where the traffic distribution group was created. The ARN must be provided if the call is from the replicated Region", + "The identifier of the traffic distribution group. This can be the ID or the ARN of the traffic distribution group", args: { name: "string", }, @@ -5681,7 +5683,7 @@ const completionSpec: Fig.Spec = { { name: "get-metric-data-v2", description: - "Gets metric data from the specified Amazon Connect instance. GetMetricDataV2 offers more features than GetMetricData, the previous version of this API. It has new metrics, offers filtering at a metric level, and offers the ability to filter and group data by channels, queues, routing profiles, agents, and agent hierarchy levels. It can retrieve historical data for the last 3 months, at varying intervals. For a description of the historical metrics that are supported by GetMetricDataV2 and GetMetricData, see Historical metrics definitions in the Amazon Connect Administrator Guide", + "Gets metric data from the specified Amazon Connect instance. GetMetricDataV2 offers more features than GetMetricData, the previous version of this API. It has new metrics, offers filtering at a metric level, and offers the ability to filter and group data by channels, queues, routing profiles, agents, and agent hierarchy levels. It can retrieve historical data for the last 3 months, at varying intervals. It does not support agent queues. For a description of the historical metrics that are supported by GetMetricDataV2 and GetMetricData, see Historical metrics definitions in the Amazon Connect Administrator Guide", options: [ { name: "--resource-arn", @@ -5734,7 +5736,7 @@ const completionSpec: Fig.Spec = { { name: "--metrics", description: - 'The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator Guide. ABANDONMENT_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Abandonment rate AGENT_ADHERENT_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherent time AGENT_ANSWER_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent answer rate AGENT_NON_ADHERENT_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Non-adherent time AGENT_NON_RESPONSE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent non-response AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy Data for this metric is available starting from October 1, 2023 0:00:00 GMT. UI name: Agent non-response without customer abandons AGENT_OCCUPANCY Unit: Percentage Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Occupancy AGENT_SCHEDULE_ADHERENCE This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherence AGENT_SCHEDULED_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Scheduled time AVG_ABANDON_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue abandon time AVG_ACTIVE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average active time AVG_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average after contact work time Feature is a valid filter but not a valid grouping. AVG_AGENT_CONNECTING_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Average agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. AVG_AGENT_PAUSE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average agent pause time AVG_CASE_RELATED_CONTACTS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average contacts per case AVG_CASE_RESOLUTION_TIME Unit: Seconds Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average case resolution time AVG_CONTACT_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average contact duration Feature is a valid filter but not a valid grouping. AVG_CONVERSATION_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average conversation duration AVG_DIALS_PER_MINUTE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent, Queue, Routing Profile UI name: Average dials per minute AVG_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Average flow time AVG_GREETING_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent greeting time AVG_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression UI name: Average handle time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME_ALL_CONTACTS Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time all contacts AVG_HOLDS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average holds Feature is a valid filter but not a valid grouping. AVG_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction and customer hold time AVG_INTERACTION_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction time Feature is a valid filter but not a valid grouping. AVG_INTERRUPTIONS_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruptions AVG_INTERRUPTION_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruption time AVG_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average non-talk time AVG_QUEUE_ANSWER_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue answer time Feature is a valid filter but not a valid grouping. AVG_RESOLUTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average resolution time AVG_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average talk time AVG_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent talk time AVG_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer talk time AVG_WAIT_TIME_AFTER_CUSTOMER_CONNECTION This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Seconds Valid groupings and filters: Campaign UI name: Average wait time after customer connection CAMPAIGN_CONTACTS_ABANDONED_AFTER_X This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X CAMPAIGN_CONTACTS_ABANDONED_AFTER_X_RATE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Percent Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X rate CASES_CREATED Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases created CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts created Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: Count Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: API contacts handled Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts handled (connected to agent timestamp) CONTACTS_HOLD_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts hold disconnect CONTACTS_ON_HOLD_AGENT_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold agent disconnect CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold customer disconnect CONTACTS_PUT_ON_HOLD Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts put on hold CONTACTS_TRANSFERRED_OUT_EXTERNAL Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out external CONTACTS_TRANSFERRED_OUT_INTERNAL Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out internal CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts queued CONTACTS_QUEUED_BY_ENQUEUE Unit: Count Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts queued (enqueue timestamp) CONTACTS_REMOVED_FROM_QUEUE_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts removed from queue in X seconds CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts resolved in X CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out Feature is a valid filter but not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out queue CURRENT_CASES Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases DELIVERY_ATTEMPTS This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Queue, Routing Profile, Answering Machine Detection Status, Disconnect Reason UI name: Delivery attempts DELIVERY_ATTEMPT_DISPOSITION_RATE This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Percent Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Answering Machine Detection Status, Disconnect Reason Answering Machine Detection Status and Disconnect Reason are valid filters but not valid groupings. UI name: Delivery attempt disposition rate FLOWS_OUTCOME Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome FLOWS_STARTED Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows started HUMAN_ANSWERED_CALLS This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Count Valid groupings and filters: Campaign, Agent UI name: Human answered MAX_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Maximum flow time MAX_QUEUED_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Maximum queued time MIN_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Minimum flow time PERCENT_CASES_FIRST_CONTACT_RESOLVED Unit: Percent Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved on first contact PERCENT_CONTACTS_STEP_EXPIRED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_CONTACTS_STEP_JOINED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_FLOWS_OUTCOME Unit: Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome percentage. The FLOWS_OUTCOME_TYPE is not a valid grouping. PERCENT_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Non-talk time percent PERCENT_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Talk time percent PERCENT_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Agent talk time percent PERCENT_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Customer talk time percent REOPENED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases reopened RESOLVED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved SERVICE_LEVEL You can include up to 20 SERVICE_LEVEL metrics in a request. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Service level X STEP_CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. SUM_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: After contact work time SUM_CONNECTING_TIME_AGENT Unit: Seconds Valid metric filter key: INITIATION_METHOD. This metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| Incoming | Outbound | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: Contact abandoned SUM_CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts abandoned in X seconds SUM_CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts answered in X seconds SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact flow time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent on contact time SUM_CONTACTS_DISCONNECTED Valid metric filter key: DISCONNECT_REASON Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contact disconnected SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time SUM_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Agent interaction and hold time SUM_INTERACTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent interaction time SUM_NON_PRODUCTIVE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Non-Productive Time SUM_ONLINE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Online time SUM_RETRY_CALLBACK_ATTEMPTS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Callback attempts', + 'The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator Guide. ABANDONMENT_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Abandonment rate AGENT_ADHERENT_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherent time AGENT_ANSWER_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent answer rate AGENT_NON_ADHERENT_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Non-adherent time AGENT_NON_RESPONSE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent non-response AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy Data for this metric is available starting from October 1, 2023 0:00:00 GMT. UI name: Agent non-response without customer abandons AGENT_OCCUPANCY Unit: Percentage Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Occupancy AGENT_SCHEDULE_ADHERENCE This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherence AGENT_SCHEDULED_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Scheduled time AVG_ABANDON_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue abandon time AVG_ACTIVE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average active time AVG_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average after contact work time Feature is a valid filter but not a valid grouping. AVG_AGENT_CONNECTING_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Average agent API connecting time The Negate key in metric-level filters is not applicable for this metric. AVG_AGENT_PAUSE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average agent pause time AVG_CASE_RELATED_CONTACTS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average contacts per case AVG_CASE_RESOLUTION_TIME Unit: Seconds Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average case resolution time AVG_CONTACT_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average contact duration Feature is a valid filter but not a valid grouping. AVG_CONVERSATION_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average conversation duration AVG_DIALS_PER_MINUTE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent, Queue, Routing Profile UI name: Average dials per minute AVG_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Average flow time AVG_GREETING_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent greeting time AVG_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression UI name: Average handle time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME_ALL_CONTACTS Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time all contacts AVG_HOLDS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average holds Feature is a valid filter but not a valid grouping. AVG_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction and customer hold time AVG_INTERACTION_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction time Feature is a valid filter but not a valid grouping. AVG_INTERRUPTIONS_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruptions AVG_INTERRUPTION_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruption time AVG_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average non-talk time AVG_QUEUE_ANSWER_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue answer time Feature is a valid filter but not a valid grouping. AVG_RESOLUTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average resolution time AVG_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average talk time AVG_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent talk time AVG_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer talk time AVG_WAIT_TIME_AFTER_CUSTOMER_CONNECTION This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Seconds Valid groupings and filters: Campaign UI name: Average wait time after customer connection CAMPAIGN_CONTACTS_ABANDONED_AFTER_X This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X CAMPAIGN_CONTACTS_ABANDONED_AFTER_X_RATE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Percent Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X rate CASES_CREATED Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases created CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts created Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: Count Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: API contacts handled Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts handled (connected to agent timestamp) CONTACTS_HOLD_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts hold disconnect CONTACTS_ON_HOLD_AGENT_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold agent disconnect CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold customer disconnect CONTACTS_PUT_ON_HOLD Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts put on hold CONTACTS_TRANSFERRED_OUT_EXTERNAL Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out external CONTACTS_TRANSFERRED_OUT_INTERNAL Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out internal CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts queued CONTACTS_QUEUED_BY_ENQUEUE Unit: Count Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts queued (enqueue timestamp) CONTACTS_REMOVED_FROM_QUEUE_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts removed from queue in X seconds CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts resolved in X CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out Feature is a valid filter but not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out queue CURRENT_CASES Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases DELIVERY_ATTEMPTS This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Queue, Routing Profile, Answering Machine Detection Status, Disconnect Reason UI name: Delivery attempts DELIVERY_ATTEMPT_DISPOSITION_RATE This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Percent Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Answering Machine Detection Status, Disconnect Reason Answering Machine Detection Status and Disconnect Reason are valid filters but not valid groupings. UI name: Delivery attempt disposition rate FLOWS_OUTCOME Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome FLOWS_STARTED Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows started HUMAN_ANSWERED_CALLS This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Count Valid groupings and filters: Campaign, Agent UI name: Human answered MAX_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Maximum flow time MAX_QUEUED_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Maximum queued time MIN_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Minimum flow time PERCENT_CASES_FIRST_CONTACT_RESOLVED Unit: Percent Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved on first contact PERCENT_CONTACTS_STEP_EXPIRED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_CONTACTS_STEP_JOINED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_FLOWS_OUTCOME Unit: Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome percentage. The FLOWS_OUTCOME_TYPE is not a valid grouping. PERCENT_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Non-talk time percent PERCENT_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Talk time percent PERCENT_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Agent talk time percent PERCENT_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Customer talk time percent REOPENED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases reopened RESOLVED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved SERVICE_LEVEL You can include up to 20 SERVICE_LEVEL metrics in a request. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Service level X STEP_CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. SUM_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: After contact work time SUM_CONNECTING_TIME_AGENT Unit: Seconds Valid metric filter key: INITIATION_METHOD. This metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent API connecting time The Negate key in metric-level filters is not applicable for this metric. CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| Incoming | Outbound | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: Contact abandoned SUM_CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts abandoned in X seconds SUM_CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts answered in X seconds SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact flow time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent on contact time SUM_CONTACTS_DISCONNECTED Valid metric filter key: DISCONNECT_REASON Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contact disconnected SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time SUM_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Agent interaction and hold time SUM_INTERACTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent interaction time SUM_NON_PRODUCTIVE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Non-Productive Time SUM_ONLINE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Online time SUM_RETRY_CALLBACK_ATTEMPTS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Callback attempts', args: { name: "list", }, @@ -11717,6 +11719,54 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "start-screen-sharing", + description: + "Starts screen sharing for a contact. For more information about screen sharing, see Set up in-app, web, video calling, and screen sharing capabilities in the Amazon Connect Administrator Guide", + options: [ + { + name: "--client-token", + description: + "A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs", + args: { + name: "string", + }, + }, + { + name: "--instance-id", + description: + "The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance", + args: { + name: "string", + }, + }, + { + name: "--contact-id", + description: + "The identifier of the contact in this instance of Amazon Connect", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "start-task-contact", description: diff --git a/src/aws/datasync.ts b/src/aws/datasync.ts index 9b0738f9f3f7..d5efde3ad3fd 100644 --- a/src/aws/datasync.ts +++ b/src/aws/datasync.ts @@ -126,12 +126,12 @@ const completionSpec: Fig.Spec = { { name: "create-agent", description: - "Activates an DataSync agent that you've deployed in your storage environment. The activation process associates the agent with your Amazon Web Services account. If you haven't deployed an agent yet, see the following topics to learn more: Agent requirements Create an agent If you're transferring between Amazon Web Services storage services, you don't need a DataSync agent", + "Activates an DataSync agent that you deploy in your storage environment. The activation process associates the agent with your Amazon Web Services account. If you haven't deployed an agent yet, see Do I need a DataSync agent?", options: [ { name: "--activation-key", description: - "Specifies your DataSync agent's activation key. If you don't have an activation key, see Activate your agent", + "Specifies your DataSync agent's activation key. If you don't have an activation key, see Activating your agent", args: { name: "string", }, @@ -139,7 +139,7 @@ const completionSpec: Fig.Spec = { { name: "--agent-name", description: - "Specifies a name for your agent. You can see this name in the DataSync console", + "Specifies a name for your agent. We recommend specifying a name that you can remember", args: { name: "string", }, @@ -155,7 +155,7 @@ const completionSpec: Fig.Spec = { { name: "--vpc-endpoint-id", description: - "Specifies the ID of the VPC endpoint that you want your agent to connect to. For example, a VPC endpoint ID looks like vpce-01234d5aff67890e1. The VPC endpoint you use must include the DataSync service name (for example, com.amazonaws.us-east-2.datasync)", + "Specifies the ID of the VPC service endpoint that you're using. For example, a VPC endpoint ID looks like vpce-01234d5aff67890e1. The VPC service endpoint you use must include the DataSync service name (for example, com.amazonaws.us-east-2.datasync)", args: { name: "string", }, @@ -163,7 +163,7 @@ const completionSpec: Fig.Spec = { { name: "--subnet-arns", description: - "Specifies the ARN of the subnet where you want to run your DataSync task when using a VPC endpoint. This is the subnet where DataSync creates and manages the network interfaces for your transfer. You can only specify one ARN", + "Specifies the ARN of the subnet where your VPC service endpoint is located. You can only specify one ARN", args: { name: "list", }, @@ -171,7 +171,7 @@ const completionSpec: Fig.Spec = { { name: "--security-group-arns", description: - "Specifies the Amazon Resource Name (ARN) of the security group that protects your task's network interfaces when using a virtual private cloud (VPC) endpoint. You can only specify one ARN", + "Specifies the Amazon Resource Name (ARN) of the security group that allows traffic between your agent and VPC service endpoint. You can only specify one ARN", args: { name: "list", }, @@ -291,14 +291,14 @@ const completionSpec: Fig.Spec = { { name: "--subdirectory", description: - "Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data (depending on if this is a source or destination location). By default, DataSync uses the root directory, but you can also include subdirectories. You must specify a value with forward slashes (for example, /path/to/folder)", + "Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data (depending on if this is a source or destination location) on your file system. By default, DataSync uses the root directory (or access point if you provide one by using AccessPointArn). You can also include subdirectories using forward slashes (for example, /path/to/folder)", args: { name: "string", }, }, { name: "--efs-filesystem-arn", - description: "Specifies the ARN for the Amazon EFS file system", + description: "Specifies the ARN for your Amazon EFS file system", args: { name: "string", }, @@ -306,7 +306,7 @@ const completionSpec: Fig.Spec = { { name: "--ec2-config", description: - "Specifies the subnet and security groups DataSync uses to access your Amazon EFS file system", + "Specifies the subnet and security groups DataSync uses to connect to one of your Amazon EFS file system's mount targets", args: { name: "structure", }, @@ -322,7 +322,7 @@ const completionSpec: Fig.Spec = { { name: "--access-point-arn", description: - "Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses to access the Amazon EFS file system", + "Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses to mount your Amazon EFS file system. For more information, see Accessing restricted file systems", args: { name: "string", }, @@ -330,7 +330,7 @@ const completionSpec: Fig.Spec = { { name: "--file-system-access-role-arn", description: - "Specifies an Identity and Access Management (IAM) role that DataSync assumes when mounting the Amazon EFS file system", + "Specifies an Identity and Access Management (IAM) role that allows DataSync to access your Amazon EFS file system. For information on creating this role, see Creating a DataSync IAM role for file system access", args: { name: "string", }, @@ -338,7 +338,7 @@ const completionSpec: Fig.Spec = { { name: "--in-transit-encryption", description: - "Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2 encryption when it copies data to or from the Amazon EFS file system. If you specify an access point using AccessPointArn or an IAM role using FileSystemAccessRoleArn, you must set this parameter to TLS1_2", + "Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2 encryption when it transfers data to or from your Amazon EFS file system. If you specify an access point using AccessPointArn or an IAM role using FileSystemAccessRoleArn, you must set this parameter to TLS1_2", args: { name: "string", }, @@ -570,7 +570,7 @@ const completionSpec: Fig.Spec = { { name: "--security-group-arns", description: - "Specifies the ARNs of the security groups that provide access to your file system's preferred subnet. If you choose a security group that doesn't allow connections from within itself, do one of the following: Configure the security group to allow it to communicate within itself. Choose a different security group that can communicate with the mount target's security group", + "Specifies the ARNs of the Amazon EC2 security groups that provide access to your file system's preferred subnet. The security groups that you specify must be able to communicate with your file system's security groups. For information about configuring security groups for file system access, see the Amazon FSx for Windows File Server User Guide . If you choose a security group that doesn't allow connections from within itself, do one of the following: Configure the security group to allow it to communicate within itself. Choose a different security group that can communicate with the mount target's security group", args: { name: "list", }, @@ -722,7 +722,7 @@ const completionSpec: Fig.Spec = { { name: "--agent-arns", description: - "The Amazon Resource Names (ARNs) of the agents that are used to connect to the HDFS cluster", + "The Amazon Resource Names (ARNs) of the DataSync agents that can connect to your HDFS cluster", args: { name: "list", }, @@ -778,7 +778,7 @@ const completionSpec: Fig.Spec = { { name: "--on-prem-config", description: - "Specifies the Amazon Resource Name (ARN) of the DataSync agent that want to connect to your NFS file server. You can specify more than one agent. For more information, see Using multiple agents for transfers", + "Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect to your NFS file server. You can specify more than one agent. For more information, see Using multiple DataSync agents", args: { name: "structure", }, @@ -882,7 +882,7 @@ const completionSpec: Fig.Spec = { { name: "--agent-arns", description: - "Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can securely connect with your location", + "Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can connect with your object storage system", args: { name: "list", }, @@ -1042,7 +1042,7 @@ const completionSpec: Fig.Spec = { { name: "--agent-arns", description: - "Specifies the DataSync agent (or agents) which you want to connect to your SMB file server. You specify an agent by using its Amazon Resource Name (ARN)", + "Specifies the DataSync agent (or agents) that can connect to your SMB file server. You specify an agent by using its Amazon Resource Name (ARN)", args: { name: "list", }, @@ -1105,7 +1105,7 @@ const completionSpec: Fig.Spec = { { name: "--cloud-watch-log-group-arn", description: - "Specifies the Amazon Resource Name (ARN) of an Amazon CloudWatch log group for monitoring your task", + "Specifies the Amazon Resource Name (ARN) of an Amazon CloudWatch log group for monitoring your task. For Enhanced mode tasks, you don't need to specify anything. DataSync automatically sends logs to a CloudWatch log group named /aws/datasync", args: { name: "string", }, @@ -1152,7 +1152,7 @@ const completionSpec: Fig.Spec = { { name: "--includes", description: - "Specifies include filters define the files, objects, and folders in your source location that you want DataSync to transfer. For more information and examples, see Specifying what DataSync transfers by using filters", + "Specifies include filters that define the files, objects, and folders in your source location that you want DataSync to transfer. For more information and examples, see Specifying what DataSync transfers by using filters", args: { name: "list", }, @@ -1173,6 +1173,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--task-mode", + description: + "Specifies one of the following task modes for your data transfer: ENHANCED - Transfer virtually unlimited numbers of objects with enhanced metrics, more detailed logs, and higher performance than Basic mode. Currently available for transfers between Amazon S3 locations. To create an Enhanced mode task, the IAM role that you use to call the CreateTask operation must have the iam:CreateServiceLinkedRole permission. BASIC (default) - Transfer files or objects between Amazon Web Services storage and on-premises, edge, or other cloud storage. DataSync quotas apply. For more information, see Understanding task mode differences", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -1944,7 +1952,7 @@ const completionSpec: Fig.Spec = { { name: "describe-task-execution", description: - "Provides information about an execution of your DataSync task. You can use this operation to help monitor the progress of an ongoing transfer or check the results of the transfer", + "Provides information about an execution of your DataSync task. You can use this operation to help monitor the progress of an ongoing data transfer or check the results of the transfer. Some DescribeTaskExecution response elements are only relevant to a specific task mode. For information, see Understanding task mode differences and Understanding data transfer performance metrics", options: [ { name: "--task-execution-arn", @@ -2593,7 +2601,7 @@ const completionSpec: Fig.Spec = { { name: "start-task-execution", description: - "Starts an DataSync transfer task. For each task, you can only run one task execution at a time. There are several phases to a task execution. For more information, see Task execution statuses. If you're planning to transfer data to or from an Amazon S3 location, review how DataSync can affect your S3 request charges and the DataSync pricing page before you begin", + "Starts an DataSync transfer task. For each task, you can only run one task execution at a time. There are several steps to a task execution. For more information, see Task execution statuses. If you're planning to transfer data to or from an Amazon S3 location, review how DataSync can affect your S3 request charges and the DataSync pricing page before you begin", options: [ { name: "--task-arn", @@ -3040,7 +3048,7 @@ const completionSpec: Fig.Spec = { { name: "--agent-arns", description: - "The ARNs of the agents that are used to connect to the HDFS cluster", + "The Amazon Resource Names (ARNs) of the DataSync agents that can connect to your HDFS cluster", args: { name: "list", }, @@ -3088,7 +3096,7 @@ const completionSpec: Fig.Spec = { { name: "--on-prem-config", description: - "The DataSync agents that are connecting to a Network File System (NFS) location", + "The DataSync agents that can connect to your Network File System (NFS) file server", args: { name: "structure", }, @@ -3176,7 +3184,7 @@ const completionSpec: Fig.Spec = { { name: "--agent-arns", description: - "Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can securely connect with your location", + "Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can connect with your object storage system", args: { name: "list", }, @@ -3256,7 +3264,7 @@ const completionSpec: Fig.Spec = { { name: "--agent-arns", description: - "Specifies the DataSync agent (or agents) which you want to connect to your SMB file server. You specify an agent by using its Amazon Resource Name (ARN)", + "Specifies the DataSync agent (or agents) that can connect to your SMB file server. You specify an agent by using its Amazon Resource Name (ARN)", args: { name: "list", }, @@ -3406,7 +3414,7 @@ const completionSpec: Fig.Spec = { { name: "--cloud-watch-log-group-arn", description: - "Specifies the Amazon Resource Name (ARN) of an Amazon CloudWatch log group for monitoring your task", + "Specifies the Amazon Resource Name (ARN) of an Amazon CloudWatch log group for monitoring your task. For Enhanced mode tasks, you must use /aws/datasync as your log group name. For example: arn:aws:logs:us-east-1:111222333444:log-group:/aws/datasync:* For more information, see Monitoring data transfers with CloudWatch Logs", args: { name: "string", }, diff --git a/src/aws/docdb-elastic.ts b/src/aws/docdb-elastic.ts index d953a5e9100a..203acb55db5c 100644 --- a/src/aws/docdb-elastic.ts +++ b/src/aws/docdb-elastic.ts @@ -3,6 +3,62 @@ const completionSpec: Fig.Spec = { description: "Amazon DocumentDB elastic clusters Amazon DocumentDB elastic-clusters support workloads with millions of reads/writes per second and petabytes of storage capacity. Amazon DocumentDB elastic clusters also simplify how developers interact with Amazon DocumentDB elastic-clusters by eliminating the need to choose, manage or upgrade instances. Amazon DocumentDB elastic-clusters were created to: provide a solution for customers looking for a database that provides virtually limitless scale with rich query capabilities and MongoDB API compatibility. give customers higher connection limits, and to reduce downtime from patching. continue investing in a cloud-native, elastic, and class leading architecture for JSON workloads", subcommands: [ + { + name: "apply-pending-maintenance-action", + description: + "The type of pending maintenance action to be applied to the resource", + options: [ + { + name: "--apply-action", + description: + "The pending maintenance action to apply to the resource. Valid actions are: ENGINE_UPDATE ENGINE_UPGRADE SECURITY_UPDATE OS_UPDATE MASTER_USER_PASSWORD_UPDATE", + args: { + name: "string", + }, + }, + { + name: "--apply-on", + description: + "A specific date to apply the pending maintenance action. Required if opt-in-type is APPLY_ON. Format: yyyy/MM/dd HH:mm-yyyy/MM/dd HH:mm", + args: { + name: "string", + }, + }, + { + name: "--opt-in-type", + description: + "A value that specifies the type of opt-in request, or undoes an opt-in request. An opt-in request of type IMMEDIATE can't be undone", + args: { + name: "string", + }, + }, + { + name: "--resource-arn", + description: + "The Amazon DocumentDB Amazon Resource Name (ARN) of the resource to which the pending maintenance action applies", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "copy-cluster-snapshot", description: "Copies a snapshot of an elastic cluster", @@ -378,6 +434,37 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-pending-maintenance-action", + description: "Retrieves all maintenance actions that are pending", + options: [ + { + name: "--resource-arn", + description: + "Retrieves pending maintenance actions for a specific Amazon Resource Name (ARN)", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-cluster-snapshots", description: @@ -521,6 +608,70 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-pending-maintenance-actions", + description: + "Retrieves a list of all maintenance actions that are pending", + options: [ + { + name: "--max-results", + description: + "The maximum number of results to include in the response. If more records exist than the specified maxResults value, a pagination token (marker) is included in the response so that the remaining results can be retrieved", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: + "An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by maxResults", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-tags-for-resource", description: "Lists all tags on a elastic cluster resource", diff --git a/src/aws/ecs.ts b/src/aws/ecs.ts index a869c69ea688..dd1a41508cbf 100644 --- a/src/aws/ecs.ts +++ b/src/aws/ecs.ts @@ -513,7 +513,7 @@ const completionSpec: Fig.Spec = { { name: "create-service", description: - "Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, use UpdateService. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Amazon Elastic Inference (EI) is no longer available to customers. In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. volumeConfigurations is only supported for REPLICA service and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. There are two service scheduler strategies available: REPLICA - The replica scheduling strategy places and maintains your desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. DAEMON - The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. You can use UpdateService. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%. If a service uses the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%. If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%. If a service uses either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state. This is while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service. When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. When the service scheduler launches new tasks, it determines task placement. For information about task placement and task placement strategies, see Amazon ECS task placement in the Amazon Elastic Container Service Developer Guide", + "Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Amazon Elastic Inference (EI) is no longer available to customers. In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. volumeConfigurations is only supported for REPLICA service and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. There are two service scheduler strategies available: REPLICA - The replica scheduling strategy places and maintains your desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. DAEMON - The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. You can use UpdateService. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%. If a service uses the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%. If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%. If a service uses either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state. This is while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service. When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. When the service scheduler launches new tasks, it determines task placement. For information about task placement and task placement strategies, see Amazon ECS task placement in the Amazon Elastic Container Service Developer Guide", options: [ { name: "--cluster", @@ -613,7 +613,7 @@ const completionSpec: Fig.Spec = { { name: "--deployment-configuration", description: - "Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks", + "Optional deployment parameters that control how many tasks run during the deployment and the failure detection methods", args: { name: "structure", isVariadic: true, @@ -1427,6 +1427,70 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "describe-service-deployments", + description: + "Describes one or more of your service deployments. A service deployment happens when you release a software update for the service. For more information, see Amazon ECS service deployments", + options: [ + { + name: "--service-deployment-arns", + description: + "The ARN of the service deployment. You can specify a maximum of 20 ARNs", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "describe-service-revisions", + description: + "Describes one or more service revisions. A service revision is a version of the service that includes the values for the Amazon ECS resources (for example, task definition) and the environment resources (for example, load balancers, subnets, and security groups). For more information, see Amazon ECS service revisions. You can't describe a service revision that was created before October 25, 2024", + options: [ + { + name: "--service-revision-arns", + description: + "The ARN of the service revision. You can specify a maximum of 20 ARNs. You can call ListServiceDeployments to get the ARNs", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "describe-services", description: "Describes the specified services running in your cluster", @@ -2142,6 +2206,77 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-service-deployments", + description: + "This operation lists all the service deployments that meet the specified filter criteria. A service deployment happens when you release a softwre update for the service. You route traffic from the running service revisions to the new service revison and control the number of running tasks. This API returns the values that you use for the request parameters in DescribeServiceRevisions", + options: [ + { + name: "--service", + description: "The ARN or name of the service", + args: { + name: "string", + }, + }, + { + name: "--cluster", + description: + "The cluster that hosts the service. This can either be the cluster name or ARN. Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performanceIf you don't specify a cluster, deault is used", + args: { + name: "string", + }, + }, + { + name: "--status", + description: + "An optional filter you can use to narrow the results. If you do not specify a status, then all status values are included in the result", + args: { + name: "list", + }, + }, + { + name: "--created-at", + description: + "An optional filter you can use to narrow the results by the service creation date. If you do not specify a value, the result includes all services created before the current time. The format is yyyy-MM-dd HH:mm:ss.SSSSSS", + args: { + name: "structure", + }, + }, + { + name: "--next-token", + description: + "The nextToken value returned from a ListServiceDeployments request indicating that more results are available to fulfill the request and further calls are needed. If you provided maxResults, it's possible the number of results is fewer than maxResults", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum number of service deployment results that ListServiceDeployments returned in paginated output. When this parameter is used, ListServiceDeployments only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListServiceDeployments request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then ListServiceDeployments returns up to 20 results and a nextToken value if applicable", + args: { + name: "integer", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-services", description: @@ -2973,7 +3108,7 @@ const completionSpec: Fig.Spec = { { name: "--network-mode", description: - "The Docker networking mode to use for the containers in the task. The valid values are none, bridge, awsvpc, and host. If no network mode is specified, the default is bridge. For Amazon ECS tasks on Fargate, the awsvpc network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, or awsvpc can be used. If the network mode is set to none, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host and awsvpc network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge mode. With the host and awsvpc network modes, exposed container ports are mapped directly to the corresponding host port (for the host network mode) or the attached elastic network interface port (for the awsvpc network mode), so you cannot take advantage of dynamic host port mappings. When using the host network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user. If the network mode is awsvpc, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide. If the network mode is host, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used", + "The Docker networking mode to use for the containers in the task. The valid values are none, bridge, awsvpc, and host. If no network mode is specified, the default is bridge. For Amazon ECS tasks on Fargate, the awsvpc network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, or awsvpc can be used. If the network mode is set to none, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host and awsvpc network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge mode. With the host and awsvpc network modes, exposed container ports are mapped directly to the corresponding host port (for the host network mode) or the attached elastic network interface port (for the awsvpc network mode), so you cannot take advantage of dynamic host port mappings. When using the host network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user. If the network mode is awsvpc, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide. If the network mode is host, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used. For more information, see Network settings in the Docker run reference", args: { name: "string", suggestions: ["bridge", "host", "awsvpc", "none"], @@ -3045,7 +3180,7 @@ const completionSpec: Fig.Spec = { { name: "--pid-mode", description: - "The process namespace to use for the containers in the task. The valid values are host or task. On Fargate for Linux containers, the only valid value is task. For example, monitoring sidecars might need pidMode to access information about other containers running in the same task. If host is specified, all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace for each container. If the host PID mode is used, there's a heightened risk of undesired process namespace exposure. This parameter is not supported for Windows containers. This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't supported for Windows containers on Fargate", + "The process namespace to use for the containers in the task. The valid values are host or task. On Fargate for Linux containers, the only valid value is task. For example, monitoring sidecars might need pidMode to access information about other containers running in the same task. If host is specified, all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace for each container. For more information, see PID settings in the Docker run reference. If the host PID mode is used, there's a heightened risk of undesired process namespace exposure. For more information, see Docker security. This parameter is not supported for Windows containers. This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't supported for Windows containers on Fargate", args: { name: "string", suggestions: ["host", "task"], @@ -3054,7 +3189,7 @@ const completionSpec: Fig.Spec = { { name: "--ipc-mode", description: - "The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none. If host is specified, then all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. If the host IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. If you are setting namespaced kernel parameters using systemControls for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide. For tasks that use the host IPC mode, IPC namespace related systemControls are not supported. For tasks that use the task IPC mode, IPC namespace related systemControls will apply to all containers within a task. This parameter is not supported for Windows containers or tasks run on Fargate", + "The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none. If host is specified, then all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see IPC settings in the Docker run reference. If the host IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. For more information, see Docker security. If you are setting namespaced kernel parameters using systemControls for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide. For tasks that use the host IPC mode, IPC namespace related systemControls are not supported. For tasks that use the task IPC mode, IPC namespace related systemControls will apply to all containers within a task. This parameter is not supported for Windows containers or tasks run on Fargate", args: { name: "string", suggestions: ["host", "task", "none"], @@ -4135,7 +4270,7 @@ const completionSpec: Fig.Spec = { { name: "--deployment-configuration", description: - "Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks", + "Optional deployment parameters that control how many tasks run during the deployment and the failure detection methods", args: { name: "structure", suggestions: [ diff --git a/src/aws/elbv2.ts b/src/aws/elbv2.ts index 95e343cd9c1f..658cd3f40f73 100644 --- a/src/aws/elbv2.ts +++ b/src/aws/elbv2.ts @@ -133,7 +133,7 @@ const completionSpec: Fig.Spec = { { name: "--protocol", description: - "The protocol for connections from clients to the load balancer. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP, TLS, UDP, and TCP_UDP. You can\u2019t specify the UDP or TCP_UDP protocol if dual-stack mode is enabled. You cannot specify a protocol for a Gateway Load Balancer", + "The protocol for connections from clients to the load balancer. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP, TLS, UDP, and TCP_UDP. You can\u2019t specify the UDP or TCP_UDP protocol if dual-stack mode is enabled. You can't specify a protocol for a Gateway Load Balancer", args: { name: "string", }, @@ -141,7 +141,7 @@ const completionSpec: Fig.Spec = { { name: "--port", description: - "The port on which the load balancer is listening. You cannot specify a port for a Gateway Load Balancer", + "The port on which the load balancer is listening. You can't specify a port for a Gateway Load Balancer", args: { name: "integer", }, @@ -226,7 +226,7 @@ const completionSpec: Fig.Spec = { { name: "--subnets", description: - "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets. [Application Load Balancers] You must specify subnets from at least two Availability Zones. [Application Load Balancers on Outposts] You must specify one Outpost subnet. [Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones. [Network Load Balancers] You can specify subnets from one or more Availability Zones. [Gateway Load Balancers] You can specify subnets from one or more Availability Zones", + "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets. [Application Load Balancers] You must specify subnets from at least two Availability Zones. [Application Load Balancers on Outposts] You must specify one Outpost subnet. [Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones. [Network Load Balancers and Gateway Load Balancers] You can specify subnets from one or more Availability Zones", args: { name: "list", }, @@ -234,7 +234,7 @@ const completionSpec: Fig.Spec = { { name: "--subnet-mappings", description: - "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. [Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets. [Application Load Balancers on Outposts] You must specify one Outpost subnet. [Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones. [Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet. [Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You cannot specify Elastic IP addresses for your subnets", + "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. [Application Load Balancers] You must specify subnets from at least two Availability Zones. You can't specify Elastic IP addresses for your subnets. [Application Load Balancers on Outposts] You must specify one Outpost subnet. [Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones. [Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet. [Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You can't specify Elastic IP addresses for your subnets", args: { name: "list", }, @@ -250,7 +250,7 @@ const completionSpec: Fig.Spec = { { name: "--scheme", description: - "The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the internet. The nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can route requests only from clients with access to the VPC for the load balancer. The default is an Internet-facing load balancer. You cannot specify a scheme for a Gateway Load Balancer", + "The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the internet. The nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can route requests only from clients with access to the VPC for the load balancer. The default is an Internet-facing load balancer. You can't specify a scheme for a Gateway Load Balancer", args: { name: "string", }, @@ -272,7 +272,7 @@ const completionSpec: Fig.Spec = { { name: "--ip-address-type", description: - "Note: Internal load balancers must use the ipv4 IP address type. [Application Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public addresses, with private IPv4 and IPv6 addresses). [Network Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can\u2019t specify dualstack for a load balancer with a UDP or TCP_UDP listener. [Gateway Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses)", + "The IP address type. Internal load balancers must use ipv4. [Application Load Balancers] The possible values are ipv4 (IPv4 addresses), dualstack (IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (public IPv6 addresses and private IPv4 and IPv6 addresses). [Network Load Balancers and Gateway Load Balancers] The possible values are ipv4 (IPv4 addresses) and dualstack (IPv4 and IPv6 addresses)", args: { name: "string", }, @@ -285,6 +285,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--enable-prefix-for-ipv6-source-nat", + description: + "[Network Load Balancers with UDP listeners] Indicates whether to use an IPv6 prefix from each subnet for source NAT. The IP address type must be dualstack. The default value is off", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -428,12 +436,12 @@ const completionSpec: Fig.Spec = { { name: "--health-check-enabled", description: - "Indicates whether health checks are enabled. If the target type is lambda, health checks are disabled by default but can be enabled. If the target type is instance, ip, or alb, health checks are always enabled and cannot be disabled", + "Indicates whether health checks are enabled. If the target type is lambda, health checks are disabled by default but can be enabled. If the target type is instance, ip, or alb, health checks are always enabled and can't be disabled", }, { name: "--no-health-check-enabled", description: - "Indicates whether health checks are enabled. If the target type is lambda, health checks are disabled by default but can be enabled. If the target type is instance, ip, or alb, health checks are always enabled and cannot be disabled", + "Indicates whether health checks are enabled. If the target type is lambda, health checks are disabled by default but can be enabled. If the target type is instance, ip, or alb, health checks are always enabled and can't be disabled", }, { name: "--health-check-path", @@ -500,8 +508,7 @@ const completionSpec: Fig.Spec = { }, { name: "--ip-address-type", - description: - "The type of IP address used for this target group. The possible values are ipv4 and ipv6. This is an optional parameter. If not specified, the IP address type defaults to ipv4", + description: "The IP address type. The default value is ipv4", args: { name: "string", }, @@ -532,7 +539,7 @@ const completionSpec: Fig.Spec = { { name: "--name", description: - "The name of the trust store. This name must be unique per region and cannot be changed after creation", + "The name of the trust store. This name must be unique per region and can't be changed after creation", args: { name: "string", }, @@ -1728,7 +1735,7 @@ const completionSpec: Fig.Spec = { { name: "--port", description: - "The port for connections from clients to the load balancer. You cannot specify a port for a Gateway Load Balancer", + "The port for connections from clients to the load balancer. You can't specify a port for a Gateway Load Balancer", args: { name: "integer", }, @@ -1736,7 +1743,7 @@ const completionSpec: Fig.Spec = { { name: "--protocol", description: - "The protocol for connections from clients to the load balancer. Application Load Balancers support the HTTP and HTTPS protocols. Network Load Balancers support the TCP, TLS, UDP, and TCP_UDP protocols. You can\u2019t change the protocol to UDP or TCP_UDP if dual-stack mode is enabled. You cannot specify a protocol for a Gateway Load Balancer", + "The protocol for connections from clients to the load balancer. Application Load Balancers support the HTTP and HTTPS protocols. Network Load Balancers support the TCP, TLS, UDP, and TCP_UDP protocols. You can\u2019t change the protocol to UDP or TCP_UDP if dual-stack mode is enabled. You can't specify a protocol for a Gateway Load Balancer", args: { name: "string", }, @@ -2116,7 +2123,7 @@ const completionSpec: Fig.Spec = { { name: "register-targets", description: - "Registers the specified targets with the specified target group. If the target is an EC2 instance, it must be in the running state when you register it. By default, the load balancer routes requests to registered targets using the protocol and port for the target group. Alternatively, you can override the port for a target when you register it. You can register each EC2 instance or IP address with the same target group multiple times using different ports. With a Network Load Balancer, you cannot register instances by instance ID if they have the following instance types: C1, CC1, CC2, CG1, CG2, CR1, CS1, G1, G2, HI1, HS1, M1, M2, M3, and T1. You can register instances of these types by IP address", + "Registers the specified targets with the specified target group. If the target is an EC2 instance, it must be in the running state when you register it. By default, the load balancer routes requests to registered targets using the protocol and port for the target group. Alternatively, you can override the port for a target when you register it. You can register each EC2 instance or IP address with the same target group multiple times using different ports. With a Network Load Balancer, you can't register instances by instance ID if they have the following instance types: C1, CC1, CC2, CG1, CG2, CR1, CS1, G1, G2, HI1, HS1, M1, M2, M3, and T1. You can register instances of these types by IP address", options: [ { name: "--target-group-arn", @@ -2282,7 +2289,7 @@ const completionSpec: Fig.Spec = { { name: "--ip-address-type", description: - "Note: Internal load balancers must use the ipv4 IP address type. [Application Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public addresses, with private IPv4 and IPv6 addresses). Note: Application Load Balancer authentication only supports IPv4 addresses when connecting to an Identity Provider (IdP) or Amazon Cognito endpoint. Without a public IPv4 address the load balancer cannot complete the authentication process, resulting in HTTP 500 errors. [Network Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can\u2019t specify dualstack for a load balancer with a UDP or TCP_UDP listener. [Gateway Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses)", + "The IP address type. Internal load balancers must use ipv4. [Application Load Balancers] The possible values are ipv4 (IPv4 addresses), dualstack (IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (public IPv6 addresses and private IPv4 and IPv6 addresses). Application Load Balancer authentication supports IPv4 addresses only when connecting to an Identity Provider (IdP) or Amazon Cognito endpoint. Without a public IPv4 address the load balancer can't complete the authentication process, resulting in HTTP 500 errors. [Network Load Balancers and Gateway Load Balancers] The possible values are ipv4 (IPv4 addresses) and dualstack (IPv4 and IPv6 addresses)", args: { name: "string", }, @@ -2398,7 +2405,7 @@ const completionSpec: Fig.Spec = { { name: "--subnets", description: - "The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings. [Application Load Balancers] You must specify subnets from at least two Availability Zones. [Application Load Balancers on Outposts] You must specify one Outpost subnet. [Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones. [Network Load Balancers] You can specify subnets from one or more Availability Zones. [Gateway Load Balancers] You can specify subnets from one or more Availability Zones", + "The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings. [Application Load Balancers] You must specify subnets from at least two Availability Zones. [Application Load Balancers on Outposts] You must specify one Outpost subnet. [Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones. [Network Load Balancers and Gateway Load Balancers] You can specify subnets from one or more Availability Zones", args: { name: "list", }, @@ -2406,7 +2413,7 @@ const completionSpec: Fig.Spec = { { name: "--subnet-mappings", description: - "The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings. [Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets. [Application Load Balancers on Outposts] You must specify one Outpost subnet. [Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones. [Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet. [Gateway Load Balancers] You can specify subnets from one or more Availability Zones", + "The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings. [Application Load Balancers] You must specify subnets from at least two Availability Zones. You can't specify Elastic IP addresses for your subnets. [Application Load Balancers on Outposts] You must specify one Outpost subnet. [Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones. [Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet. [Gateway Load Balancers] You can specify subnets from one or more Availability Zones", args: { name: "list", }, @@ -2414,7 +2421,15 @@ const completionSpec: Fig.Spec = { { name: "--ip-address-type", description: - "[Application Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public addresses, with private IPv4 and IPv6 addresses). [Network Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can\u2019t specify dualstack for a load balancer with a UDP or TCP_UDP listener. [Gateway Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses)", + "The IP address type. [Application Load Balancers] The possible values are ipv4 (IPv4 addresses), dualstack (IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (public IPv6 addresses and private IPv4 and IPv6 addresses). [Network Load Balancers and Gateway Load Balancers] The possible values are ipv4 (IPv4 addresses) and dualstack (IPv4 and IPv6 addresses)", + args: { + name: "string", + }, + }, + { + name: "--enable-prefix-for-ipv6-source-nat", + description: + "[Network Load Balancers with UDP listeners] Indicates whether to use an IPv6 prefix from each subnet for source NAT. The IP address type must be dualstack. The default value is off", args: { name: "string", }, diff --git a/src/aws/geo-maps.ts b/src/aws/geo-maps.ts new file mode 100644 index 000000000000..a0bb57923198 --- /dev/null +++ b/src/aws/geo-maps.ts @@ -0,0 +1,303 @@ +const completionSpec: Fig.Spec = { + name: "geo-maps", + description: + "Integrate high-quality base map data into your applications using MapLibre. Capabilities include: Access to comprehensive base map data, allowing you to tailor the map display to your specific needs. Multiple pre-designed map styles suited for various application types, such as navigation, logistics, or data visualization. Generation of static map images for scenarios where interactive maps aren't suitable, such as: Embedding in emails or documents Displaying in low-bandwidth environments Creating printable maps Enhancing application performance by reducing client-side rendering", + subcommands: [ + { + name: "get-glyphs", + description: "Returns the map's glyphs", + options: [ + { + name: "--font-stack", + description: + "Name of the FontStack to retrieve. Example: Amazon Ember Bold,Noto Sans Bold. The supported font stacks are as follows: Amazon Ember Bold Amazon Ember Bold Italic Amazon Ember Bold,Noto Sans Bold Amazon Ember Bold,Noto Sans Bold,Noto Sans Arabic Bold Amazon Ember Condensed RC BdItalic Amazon Ember Condensed RC Bold Amazon Ember Condensed RC Bold Italic Amazon Ember Condensed RC Bold,Noto Sans Bold Amazon Ember Condensed RC Bold,Noto Sans Bold,Noto Sans Arabic Condensed Bold Amazon Ember Condensed RC Light Amazon Ember Condensed RC Light Italic Amazon Ember Condensed RC LtItalic Amazon Ember Condensed RC Regular Amazon Ember Condensed RC Regular Italic Amazon Ember Condensed RC Regular,Noto Sans Regular Amazon Ember Condensed RC Regular,Noto Sans Regular,Noto Sans Arabic Condensed Regular Amazon Ember Condensed RC RgItalic Amazon Ember Condensed RC ThItalic Amazon Ember Condensed RC Thin Amazon Ember Condensed RC Thin Italic Amazon Ember Heavy Amazon Ember Heavy Italic Amazon Ember Light Amazon Ember Light Italic Amazon Ember Medium Amazon Ember Medium Italic Amazon Ember Medium,Noto Sans Medium Amazon Ember Medium,Noto Sans Medium,Noto Sans Arabic Medium Amazon Ember Regular Amazon Ember Regular Italic Amazon Ember Regular Italic,Noto Sans Italic Amazon Ember Regular Italic,Noto Sans Italic,Noto Sans Arabic Regular Amazon Ember Regular,Noto Sans Regular Amazon Ember Regular,Noto Sans Regular,Noto Sans Arabic Regular Amazon Ember Thin Amazon Ember Thin Italic AmazonEmberCdRC_Bd AmazonEmberCdRC_BdIt AmazonEmberCdRC_Lt AmazonEmberCdRC_LtIt AmazonEmberCdRC_Rg AmazonEmberCdRC_RgIt AmazonEmberCdRC_Th AmazonEmberCdRC_ThIt AmazonEmber_Bd AmazonEmber_BdIt AmazonEmber_He AmazonEmber_HeIt AmazonEmber_Lt AmazonEmber_LtIt AmazonEmber_Md AmazonEmber_MdIt AmazonEmber_Rg AmazonEmber_RgIt AmazonEmber_Th AmazonEmber_ThIt Noto Sans Black Noto Sans Black Italic Noto Sans Bold Noto Sans Bold Italic Noto Sans Extra Bold Noto Sans Extra Bold Italic Noto Sans Extra Light Noto Sans Extra Light Italic Noto Sans Italic Noto Sans Light Noto Sans Light Italic Noto Sans Medium Noto Sans Medium Italic Noto Sans Regular Noto Sans Semi Bold Noto Sans Semi Bold Italic Noto Sans Thin Noto Sans Thin Italic NotoSans-Bold NotoSans-Italic NotoSans-Medium NotoSans-Regular Open Sans Regular,Arial Unicode MS Regular", + args: { + name: "string", + }, + }, + { + name: "--font-unicode-range", + description: + "A Unicode range of characters to download glyphs for. This must be aligned to multiples of 256. Example: 0-255.pdf", + args: { + name: "string", + }, + }, + { + name: "outfile", + description: "Filename where the content will be saved", + args: { + name: "string", + }, + }, + ], + }, + { + name: "get-sprites", + description: "Returns the map's sprites", + options: [ + { + name: "--file-name", + description: + "Sprites API: The name of the sprite \ufb01le to retrieve, following pattern sprites(@2x)?\\.(png|json). Example: sprites.png", + args: { + name: "string", + }, + }, + { + name: "--style", + description: + "Style specifies the desired map style for the Sprites APIs", + args: { + name: "string", + }, + }, + { + name: "--color-scheme", + description: + "Sets color tone for map such as dark and light for specific map styles. It applies to only vector map styles such as Standard and Monochrome. Example: Light Default value: Light Valid values for ColorScheme are case sensitive", + args: { + name: "string", + }, + }, + { + name: "--variant", + description: + "Optimizes map styles for specific use case or industry. You can choose allowed variant only with Standard map style. Example: Default Valid values for Variant are case sensitive", + args: { + name: "string", + }, + }, + { + name: "outfile", + description: "Filename where the content will be saved", + args: { + name: "string", + }, + }, + ], + }, + { + name: "get-static-map", + description: + "Provides high-quality static map images with customizable options. You can modify the map's appearance and overlay additional information. It's an ideal solution for applications requiring tailored static map snapshots", + options: [ + { + name: "--bounding-box", + description: + "Takes in two pairs of coordinates, [Lon, Lat], denoting south-westerly and north-easterly edges of the image. The underlying area becomes the view of the image. Example: -123.17075,49.26959,-123.08125,49.31429", + args: { + name: "string", + }, + }, + { + name: "--bounded-positions", + description: + "Takes in two or more pair of coordinates, [Lon, Lat], with each coordinate separated by a comma. The API will generate an image to encompass all of the provided coordinates. Cannot be used with Zoom and or Radius Example: 97.170451,78.039098,99.045536,27.176178", + args: { + name: "string", + }, + }, + { + name: "--center", + description: + "Takes in a pair of coordinates, [Lon, Lat], which becomes the center point of the image. This parameter requires that either zoom or radius is set. Cannot be used with Zoom and or Radius Example: 49.295,-123.108", + args: { + name: "string", + }, + }, + { + name: "--compact-overlay", + description: + "Takes in a string to draw geometries on the image. The input is a comma separated format as follows format: [Lon, Lat] Example: line:-122.407653,37.798557,-122.413291,37.802443;color=%23DD0000;width=7;outline-color=#00DD00;outline-width=5yd|point:-122.40572,37.80004;label=Fog Hill Market;size=large;text-color=%23DD0000;color=#EE4B2B Currently it supports the following geometry types: point, line and polygon. It does not support multiPoint , multiLine and multiPolgyon", + args: { + name: "string", + }, + }, + { + name: "--geo-json-overlay", + description: + 'Takes in a string to draw geometries on the image. The input is a valid GeoJSON collection object. Example: {"type":"FeatureCollection","features": [{"type":"Feature","geometry":{"type":"MultiPoint","coordinates": [[-90.076345,51.504107],[-0.074451,51.506892]]},"properties": {"color":"#00DD00"}}]}', + args: { + name: "string", + }, + }, + { + name: "--height", + description: "Specifies the height of the map image", + args: { + name: "integer", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--padding", + description: + "Applies additional space (in pixels) around overlay feature to prevent them from being cut or obscured. Value for max and min is determined by: Min: 1 Max: min(height, width)/4 Example: 100", + args: { + name: "integer", + }, + }, + { + name: "--radius", + description: + "Used with center parameter, it specifies the zoom of the image where you can control it on a granular level. Takes in any value >= 1. Example: 1500 Cannot be used with Zoom. Unit: Meters", + args: { + name: "long", + }, + }, + { + name: "--file-name", + description: + "The map scaling parameter to size the image, icons, and labels. It follows the pattern of ^map(@2x)?$. Example: map, map@2x", + args: { + name: "string", + }, + }, + { + name: "--scale-bar-unit", + description: + "Displays a scale on the bottom right of the map image with the unit specified in the input. Example: KilometersMiles, Miles, Kilometers, MilesKilometers", + args: { + name: "string", + }, + }, + { + name: "--style", + description: + "Style specifies the desired map style for the Style APIs", + args: { + name: "string", + }, + }, + { + name: "--width", + description: "Specifies the width of the map image", + args: { + name: "integer", + }, + }, + { + name: "--zoom", + description: + "Specifies the zoom level of the map image. Cannot be used with Radius", + args: { + name: "float", + }, + }, + { + name: "outfile", + description: "Filename where the content will be saved", + args: { + name: "string", + }, + }, + ], + }, + { + name: "get-style-descriptor", + description: "Returns information about the style", + options: [ + { + name: "--style", + description: "Style specifies the desired map style", + args: { + name: "string", + }, + }, + { + name: "--color-scheme", + description: + "Sets color tone for map such as dark and light for specific map styles. It applies to only vector map styles such as Standard and Monochrome. Example: Light Default value: Light Valid values for ColorScheme are case sensitive", + args: { + name: "string", + }, + }, + { + name: "--political-view", + description: + "Specifies the political view using ISO 3166-2 or ISO 3166-3 country code format. The following political views are currently supported: ARG: Argentina's view on the Southern Patagonian Ice Field and Tierra Del Fuego, including the Falkland Islands, South Georgia, and South Sandwich Islands EGY: Egypt's view on Bir Tawil IND: India's view on Gilgit-Baltistan KEN: Kenya's view on the Ilemi Triangle MAR: Morocco's view on Western Sahara PAK: Pakistan's view on Jammu and Kashmir and the Junagadh Area RUS: Russia's view on Crimea SDN: Sudan's view on the Halaib Triangle SRB: Serbia's view on Kosovo, Vukovar, and Sarengrad Islands SUR: Suriname's view on the Courantyne Headwaters and Lawa Headwaters SYR: Syria's view on the Golan Heights TUR: Turkey's view on Cyprus and Northern Cyprus TZA: Tanzania's view on Lake Malawi URY: Uruguay's view on Rincon de Artigas VNM: Vietnam's view on the Paracel Islands and Spratly Islands", + args: { + name: "string", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "outfile", + description: "Filename where the content will be saved", + args: { + name: "string", + }, + }, + ], + }, + { + name: "get-tile", + description: + "Returns a tile. Map tiles are used by clients to render a map. they're addressed using a grid arrangement with an X coordinate, Y coordinate, and Z (zoom) level", + options: [ + { + name: "--tileset", + description: + "Specifies the desired tile set. Valid Values: raster.satellite | vector.basemap", + args: { + name: "string", + }, + }, + { + name: "--z", + description: "The zoom value for the map tile", + args: { + name: "string", + }, + }, + { + name: "--x", + description: + "The X axis value for the map tile. Must be between 0 and 19", + args: { + name: "string", + }, + }, + { + name: "--y", + description: "The Y axis value for the map tile", + args: { + name: "string", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "outfile", + description: "Filename where the content will be saved", + args: { + name: "string", + }, + }, + ], + }, + ], +}; + +export default completionSpec; diff --git a/src/aws/geo-places.ts b/src/aws/geo-places.ts new file mode 100644 index 000000000000..00b7ebdcc78a --- /dev/null +++ b/src/aws/geo-places.ts @@ -0,0 +1,703 @@ +const completionSpec: Fig.Spec = { + name: "geo-places", + description: + "The Places API enables powerful location search and geocoding capabilities for your applications, offering global coverage with rich, detailed information. Key features include: Forward and reverse geocoding for addresses and coordinates Comprehensive place searches with detailed information, including: Business names and addresses Contact information Hours of operation POI (Points of Interest) categories Food types for restaurants Chain affiliation for relevant businesses Global data coverage with a wide range of POI categories Regular data updates to ensure accuracy and relevance", + subcommands: [ + { + name: "autocomplete", + description: + "The autocomplete operation speeds up and increases the accuracy of entering addresses by providing a list of address candidates matching a partially entered address. Results are sorted from most to least matching. Filtering and biasing can be used to increase the relevance of the results if additional search context is known", + options: [ + { + name: "--query-text", + description: + "The free-form text query to match addresses against. This is usually a partially typed address from an end user in an address box or form", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "An optional limit for the number of results returned in a single call", + args: { + name: "integer", + }, + }, + { + name: "--bias-position", + description: + "The position in longitude and latitude that the results should be close to. Typically, place results returned are ranked higher the closer they are to this position. Stored in [lng, lat] and in the WSG84 format. The fields BiasPosition, FilterBoundingBox, and FilterCircle are mutually exclusive", + args: { + name: "list", + }, + }, + { + name: "--filter", + description: + "A structure which contains a set of inclusion/exclusion properties that results must posses in order to be returned as a result", + args: { + name: "structure", + }, + }, + { + name: "--postal-code-mode", + description: + "The PostalCodeMode affects how postal code results are returned. If a postal code spans multiple localities and this value is empty, partial district or locality information may be returned under a single postal code result entry. If it's populated with the value cityLookup, all cities in that postal code are returned", + args: { + name: "string", + }, + }, + { + name: "--additional-features", + description: + "A list of optional additional parameters that can be requested for each result", + args: { + name: "list", + }, + }, + { + name: "--language", + description: + "A list of BCP 47 compliant language codes for the results to be rendered in. If there is no data for the result in the requested language, data will be returned in the default language for the entry", + args: { + name: "string", + }, + }, + { + name: "--political-view", + description: + "The alpha-2 or alpha-3 character code for the political view of a country. The political view applies to the results of the request to represent unresolved territorial claims through the point of view of the specified country", + args: { + name: "string", + }, + }, + { + name: "--intended-use", + description: + "Indicates if the results will be stored. Defaults to SingleUse, if left empty", + args: { + name: "string", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "geocode", + description: + "The Geocode action allows you to obtain coordinates, addresses, and other information about places", + options: [ + { + name: "--query-text", + description: + "The free-form text query to match addresses against. This is usually a partially typed address from an end user in an address box or form", + args: { + name: "string", + }, + }, + { + name: "--query-components", + description: + "A structured free text query allows you to search for places by the name or text representation of specific properties of the place", + args: { + name: "structure", + }, + }, + { + name: "--max-results", + description: + "An optional limit for the number of results returned in a single call", + args: { + name: "integer", + }, + }, + { + name: "--bias-position", + description: + "The position, in longitude and latitude, that the results should be close to. Typically, place results returned are ranked higher the closer they are to this position. Stored in [lng, lat] and in the WSG84 format. The fields BiasPosition, FilterBoundingBox, and FilterCircle are mutually exclusive", + args: { + name: "list", + }, + }, + { + name: "--filter", + description: + "A structure which contains a set of inclusion/exclusion properties that results must posses in order to be returned as a result", + args: { + name: "structure", + }, + }, + { + name: "--additional-features", + description: + "A list of optional additional parameters, such as time zone, that can be requested for each result", + args: { + name: "list", + }, + }, + { + name: "--language", + description: + "A list of BCP 47 compliant language codes for the results to be rendered in. If there is no data for the result in the requested language, data will be returned in the default language for the entry", + args: { + name: "string", + }, + }, + { + name: "--political-view", + description: + "The alpha-2 or alpha-3 character code for the political view of a country. The political view applies to the results of the request to represent unresolved territorial claims through the point of view of the specified country", + args: { + name: "string", + }, + }, + { + name: "--intended-use", + description: + "Indicates if the results will be stored. Defaults to SingleUse, if left empty", + args: { + name: "string", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-place", + description: + "Finds a place by its unique ID. A PlaceId is returned by other place operations", + options: [ + { + name: "--place-id", + description: + "The PlaceId of the place you wish to receive the information for", + args: { + name: "string", + }, + }, + { + name: "--additional-features", + description: + "A list of optional additional parameters such as time zone that can be requested for each result", + args: { + name: "list", + }, + }, + { + name: "--language", + description: + "A list of BCP 47 compliant language codes for the results to be rendered in. If there is no data for the result in the requested language, data will be returned in the default language for the entry", + args: { + name: "string", + }, + }, + { + name: "--political-view", + description: + "The alpha-2 or alpha-3 character code for the political view of a country. The political view applies to the results of the request to represent unresolved territorial claims through the point of view of the specified country", + args: { + name: "string", + }, + }, + { + name: "--intended-use", + description: + "Indicates if the results will be stored. Defaults to SingleUse, if left empty", + args: { + name: "string", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "reverse-geocode", + description: + "The ReverseGeocode operation allows you to retrieve addresses and place information from coordinates", + options: [ + { + name: "--query-position", + description: + "The position, in [lng, lat] for which you are querying nearby resultsfor. Results closer to the position will be ranked higher then results further away from the position", + args: { + name: "list", + }, + }, + { + name: "--query-radius", + description: + "The maximum distance in meters from the QueryPosition from which a result will be returned", + args: { + name: "long", + }, + }, + { + name: "--max-results", + description: + "An optional limit for the number of results returned in a single call", + args: { + name: "integer", + }, + }, + { + name: "--filter", + description: + "A structure which contains a set of inclusion/exclusion properties that results must posses in order to be returned as a result", + args: { + name: "structure", + }, + }, + { + name: "--additional-features", + description: + "A list of optional additional parameters, such as time zone that can be requested for each result", + args: { + name: "list", + }, + }, + { + name: "--language", + description: + "A list of BCP 47 compliant language codes for the results to be rendered in. If there is no data for the result in the requested language, data will be returned in the default language for the entry", + args: { + name: "string", + }, + }, + { + name: "--political-view", + description: + "The alpha-2 or alpha-3 character code for the political view of a country. The political view applies to the results of the request to represent unresolved territorial claims through the point of view of the specified country", + args: { + name: "string", + }, + }, + { + name: "--intended-use", + description: + "Indicates if the results will be stored. Defaults to SingleUse, if left empty", + args: { + name: "string", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "search-nearby", + description: "Search nearby a specified location", + options: [ + { + name: "--query-position", + description: + "The position, in [lng, lat] for which you are querying nearby resultsfor. Results closer to the position will be ranked higher then results further away from the position", + args: { + name: "list", + }, + }, + { + name: "--query-radius", + description: + "The maximum distance in meters from the QueryPosition from which a result will be returned", + args: { + name: "long", + }, + }, + { + name: "--max-results", + description: + "An optional limit for the number of results returned in a single call", + args: { + name: "integer", + }, + }, + { + name: "--filter", + description: + "A structure which contains a set of inclusion/exclusion properties that results must posses in order to be returned as a result", + args: { + name: "structure", + }, + }, + { + name: "--additional-features", + description: + "A list of optional additional parameters, such as time zone, that can be requested for each result", + args: { + name: "list", + }, + }, + { + name: "--language", + description: + "A list of BCP 47 compliant language codes for the results to be rendered in. If there is no data for the result in the requested language, data will be returned in the default language for the entry", + args: { + name: "string", + }, + }, + { + name: "--political-view", + description: + "The alpha-2 or alpha-3 character code for the political view of a country. The political view applies to the results of the request to represent unresolved territorial claims through the point of view of the specified country", + args: { + name: "string", + }, + }, + { + name: "--intended-use", + description: + "Indicates if the results will be stored. Defaults to SingleUse, if left empty", + args: { + name: "string", + }, + }, + { + name: "--next-token", + description: + "If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page", + args: { + name: "string", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "search-text", + description: + "Use the SearchText operation to search for geocode and place information. You can then complete a follow-up query suggested from the Suggest API via a query id", + options: [ + { + name: "--query-text", + description: + "The free-form text query to match addresses against. This is usually a partially typed address from an end user in an address box or form", + args: { + name: "string", + }, + }, + { + name: "--query-id", + description: "The query Id", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "An optional limit for the number of results returned in a single call", + args: { + name: "integer", + }, + }, + { + name: "--bias-position", + description: + "The position, in longitude and latitude, that the results should be close to. Typically, place results returned are ranked higher the closer they are to this position. Stored in [lng, lat] and in the WSG84 format. The fields BiasPosition, FilterBoundingBox, and FilterCircle are mutually exclusive", + args: { + name: "list", + }, + }, + { + name: "--filter", + description: + "A structure which contains a set of inclusion/exclusion properties that results must posses in order to be returned as a result", + args: { + name: "structure", + }, + }, + { + name: "--additional-features", + description: + "A list of optional additional parameters, such as time zone, that can be requested for each result", + args: { + name: "list", + }, + }, + { + name: "--language", + description: + "A list of BCP 47 compliant language codes for the results to be rendered in. If there is no data for the result in the requested language, data will be returned in the default language for the entry", + args: { + name: "string", + }, + }, + { + name: "--political-view", + description: + "The alpha-2 or alpha-3 character code for the political view of a country. The political view applies to the results of the request to represent unresolved territorial claims through the point of view of the specified country", + args: { + name: "string", + }, + }, + { + name: "--intended-use", + description: + "Indicates if the results will be stored. Defaults to SingleUse, if left empty", + args: { + name: "string", + }, + }, + { + name: "--next-token", + description: + "If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page", + args: { + name: "string", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "suggest", + description: + "The Suggest operation finds addresses or place candidates based on incomplete or misspelled queries. You then select the best query to submit based on the returned results", + options: [ + { + name: "--query-text", + description: + "The free-form text query to match addresses against. This is usually a partially typed address from an end user in an address box or form", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "An optional limit for the number of results returned in a single call", + args: { + name: "integer", + }, + }, + { + name: "--max-query-refinements", + description: + "Maximum number of query terms to be returned for use with a search text query", + args: { + name: "integer", + }, + }, + { + name: "--bias-position", + description: + "The position, in longitude and latitude, that the results should be close to. Typically, place results returned are ranked higher the closer they are to this position. Stored in [lng, lat] and in the WSG84 format. The fields BiasPosition, FilterBoundingBox, and FilterCircle are mutually exclusive", + args: { + name: "list", + }, + }, + { + name: "--filter", + description: + "A structure which contains a set of inclusion/exclusion properties that results must posses in order to be returned as a result", + args: { + name: "structure", + }, + }, + { + name: "--additional-features", + description: + "A list of optional additional parameters, such as time zone, that can be requested for each result", + args: { + name: "list", + }, + }, + { + name: "--language", + description: + "A list of BCP 47 compliant language codes for the results to be rendered in. If there is no data for the result in the requested language, data will be returned in the default language for the entry", + args: { + name: "string", + }, + }, + { + name: "--political-view", + description: + "The alpha-2 or alpha-3 character code for the political view of a country. The political view applies to the results of the request to represent unresolved territorial claims through the point of view of the specified country", + args: { + name: "string", + }, + }, + { + name: "--intended-use", + description: + "Indicates if the results will be stored. Defaults to SingleUse, if left empty", + args: { + name: "string", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + ], +}; + +export default completionSpec; diff --git a/src/aws/geo-routes.ts b/src/aws/geo-routes.ts new file mode 100644 index 000000000000..f77f590634ad --- /dev/null +++ b/src/aws/geo-routes.ts @@ -0,0 +1,713 @@ +const completionSpec: Fig.Spec = { + name: "geo-routes", + description: + "With the Amazon Location Routes API you can calculate routes and estimate travel time based on up-to-date road network and live traffic information. Calculate optimal travel routes and estimate travel times using up-to-date road network and traffic data. Key features include: Point-to-point routing with estimated travel time, distance, and turn-by-turn directions Multi-point route optimization to minimize travel time or distance Route matrices for efficient multi-destination planning Isoline calculations to determine reachable areas within specified time or distance thresholds Map-matching to align GPS traces with the road network", + subcommands: [ + { + name: "calculate-isolines", + description: + "Use the CalculateIsolines action to find service areas that can be reached in a given threshold of time, distance", + options: [ + { + name: "--allow", + description: "Features that are allowed while calculating. a route", + args: { + name: "structure", + }, + }, + { + name: "--arrival-time", + description: + "Time of arrival at the destination. Time format: YYYY-MM-DDThh:mm:ss.sssZ | YYYY-MM-DDThh:mm:ss.sss+hh:mm Examples: 2020-04-22T17:57:24Z 2020-04-22T17:57:24+02:00", + args: { + name: "string", + }, + }, + { + name: "--avoid", + description: + "Features that are avoided while calculating a route. Avoidance is on a best-case basis. If an avoidance can't be satisfied for a particular case, it violates the avoidance and the returned response produces a notice for the violation", + args: { + name: "structure", + }, + }, + { + name: "--depart-now", + description: "Uses the current time as the time of departure", + }, + { + name: "--no-depart-now", + description: "Uses the current time as the time of departure", + }, + { + name: "--departure-time", + description: + "Time of departure from thr origin. Time format:YYYY-MM-DDThh:mm:ss.sssZ | YYYY-MM-DDThh:mm:ss.sss+hh:mm Examples: 2020-04-22T17:57:24Z 2020-04-22T17:57:24+02:00", + args: { + name: "string", + }, + }, + { + name: "--destination", + description: + "The final position for the route. In the World Geodetic System (WGS 84) format: [longitude, latitude]", + args: { + name: "list", + }, + }, + { + name: "--destination-options", + description: "Destination related options", + args: { + name: "structure", + }, + }, + { + name: "--isoline-geometry-format", + description: + "The format of the returned IsolineGeometry. Default Value:FlexiblePolyline", + args: { + name: "string", + }, + }, + { + name: "--isoline-granularity", + description: "Defines the granularity of the returned Isoline", + args: { + name: "structure", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--optimize-isoline-for", + description: + "Specifies the optimization criteria for when calculating an isoline. AccurateCalculation generates an isoline of higher granularity that is more precise. FastCalculation generates an isoline faster by reducing the granularity, and in turn the quality of the isoline. BalancedCalculation generates an isoline by balancing between quality and performance. Default Value: BalancedCalculation", + args: { + name: "string", + }, + }, + { + name: "--optimize-routing-for", + description: + "Specifies the optimization criteria for calculating a route. Default Value: FastestRoute", + args: { + name: "string", + }, + }, + { + name: "--origin", + description: "The start position for the route", + args: { + name: "list", + }, + }, + { + name: "--origin-options", + description: "Origin related options", + args: { + name: "structure", + }, + }, + { + name: "--thresholds", + description: + "Threshold to be used for the isoline calculation. Up to 3 thresholds per provided type can be requested", + args: { + name: "structure", + }, + }, + { + name: "--traffic", + description: "Traffic related options", + args: { + name: "structure", + }, + }, + { + name: "--travel-mode", + description: + "Specifies the mode of transport when calculating a route. Used in estimating the speed of travel and road compatibility. The mode Scooter also applies to motorcycles, set to Scooter when wanted to calculate options for motorcycles. Default Value: Car", + args: { + name: "string", + }, + }, + { + name: "--travel-mode-options", + description: + "Travel mode related options for the provided travel mode", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "calculate-route-matrix", + description: + "Calculates route matrix containing the results for all pairs of Origins to Destinations. Each row corresponds to one entry in Origins. Each entry in the row corresponds to the route from that entry in Origins to an entry in Destinations positions", + options: [ + { + name: "--allow", + description: "Features that are allowed while calculating. a route", + args: { + name: "structure", + }, + }, + { + name: "--avoid", + description: + "Features that are avoided while calculating a route. Avoidance is on a best-case basis. If an avoidance can't be satisfied for a particular case, it violates the avoidance and the returned response produces a notice for the violation", + args: { + name: "structure", + }, + }, + { + name: "--depart-now", + description: "Uses the current time as the time of departure", + }, + { + name: "--no-depart-now", + description: "Uses the current time as the time of departure", + }, + { + name: "--departure-time", + description: + "Time of departure from thr origin. Time format:YYYY-MM-DDThh:mm:ss.sssZ | YYYY-MM-DDThh:mm:ss.sss+hh:mm Examples: 2020-04-22T17:57:24Z 2020-04-22T17:57:24+02:00", + args: { + name: "string", + }, + }, + { + name: "--destinations", + description: "List of destinations for the route", + args: { + name: "list", + }, + }, + { + name: "--exclude", + description: + "Features to be strictly excluded while calculating the route", + args: { + name: "structure", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--optimize-routing-for", + description: + "Specifies the optimization criteria for calculating a route. Default Value: FastestRoute", + args: { + name: "string", + }, + }, + { + name: "--origins", + description: "The position in longitude and latitude for the origin", + args: { + name: "list", + }, + }, + { + name: "--routing-boundary", + description: + "Boundary within which the matrix is to be calculated. All data, origins and destinations outside the boundary are considered invalid. When request routing boundary was set as AutoCircle, the response routing boundary will return Circle derived from the AutoCircle settings", + args: { + name: "structure", + }, + }, + { + name: "--traffic", + description: "Traffic related options", + args: { + name: "structure", + }, + }, + { + name: "--travel-mode", + description: + "Specifies the mode of transport when calculating a route. Used in estimating the speed of travel and road compatibility. Default Value: Car", + args: { + name: "string", + }, + }, + { + name: "--travel-mode-options", + description: + "Travel mode related options for the provided travel mode", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "calculate-routes", + description: + "Calculates a route given the following required parameters: Origin and Destination", + options: [ + { + name: "--allow", + description: "Features that are allowed while calculating. a route", + args: { + name: "structure", + }, + }, + { + name: "--arrival-time", + description: + "Time of arrival at the destination. Time format:YYYY-MM-DDThh:mm:ss.sssZ | YYYY-MM-DDThh:mm:ss.sss+hh:mm Examples: 2020-04-22T17:57:24Z 2020-04-22T17:57:24+02:00", + args: { + name: "string", + }, + }, + { + name: "--avoid", + description: + "Features that are avoided while calculating a route. Avoidance is on a best-case basis. If an avoidance can't be satisfied for a particular case, it violates the avoidance and the returned response produces a notice for the violation", + args: { + name: "structure", + }, + }, + { + name: "--depart-now", + description: "Uses the current time as the time of departure", + }, + { + name: "--no-depart-now", + description: "Uses the current time as the time of departure", + }, + { + name: "--departure-time", + description: + "Time of departure from thr origin. Time format:YYYY-MM-DDThh:mm:ss.sssZ | YYYY-MM-DDThh:mm:ss.sss+hh:mm Examples: 2020-04-22T17:57:24Z 2020-04-22T17:57:24+02:00", + args: { + name: "string", + }, + }, + { + name: "--destination", + description: + "The final position for the route. In the World Geodetic System (WGS 84) format: [longitude, latitude]", + args: { + name: "list", + }, + }, + { + name: "--destination-options", + description: "Destination related options", + args: { + name: "structure", + }, + }, + { + name: "--driver", + description: "Driver related options", + args: { + name: "structure", + }, + }, + { + name: "--exclude", + description: + "Features to be strictly excluded while calculating the route", + args: { + name: "structure", + }, + }, + { + name: "--instructions-measurement-system", + description: + "Measurement system to be used for instructions within steps in the response", + args: { + name: "string", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--languages", + description: + "List of languages for instructions within steps in the response. Instructions in the requested language are returned only if they are available", + args: { + name: "list", + }, + }, + { + name: "--leg-additional-features", + description: + "A list of optional additional parameters such as timezone that can be requested for each result. Elevation: Retrieves the elevation information for each location. Incidents: Provides information on traffic incidents along the route. PassThroughWaypoints: Indicates waypoints that are passed through without stopping. Summary: Returns a summary of the route, including distance and duration. Tolls: Supplies toll cost information along the route. TravelStepInstructions: Provides step-by-step instructions for travel along the route. TruckRoadTypes: Returns information about road types suitable for trucks. TypicalDuration: Gives typical travel duration based on historical data. Zones: Specifies the time zone information for each waypoint", + args: { + name: "list", + }, + }, + { + name: "--leg-geometry-format", + description: + "Specifies the format of the geometry returned for each leg of the route. You can choose between two different geometry encoding formats. FlexiblePolyline: A compact and precise encoding format for the leg geometry. For more information on the format, see the GitHub repository for FlexiblePolyline . Simple: A less compact encoding, which is easier to decode but may be less precise and result in larger payloads", + args: { + name: "string", + }, + }, + { + name: "--max-alternatives", + description: + "Maximum number of alternative routes to be provided in the response, if available", + args: { + name: "integer", + }, + }, + { + name: "--optimize-routing-for", + description: + "Specifies the optimization criteria for calculating a route. Default Value: FastestRoute", + args: { + name: "string", + }, + }, + { + name: "--origin", + description: "The start position for the route", + args: { + name: "list", + }, + }, + { + name: "--origin-options", + description: "Origin related options", + args: { + name: "structure", + }, + }, + { + name: "--span-additional-features", + description: + "A list of optional features such as SpeedLimit that can be requested for a Span. A span is a section of a Leg for which the requested features have the same values", + args: { + name: "list", + }, + }, + { + name: "--tolls", + description: "Toll related options", + args: { + name: "structure", + }, + }, + { + name: "--traffic", + description: "Traffic related options", + args: { + name: "structure", + }, + }, + { + name: "--travel-mode", + description: + "Specifies the mode of transport when calculating a route. Used in estimating the speed of travel and road compatibility. Default Value: Car", + args: { + name: "string", + }, + }, + { + name: "--travel-mode-options", + description: + "Travel mode related options for the provided travel mode", + args: { + name: "structure", + }, + }, + { + name: "--travel-step-type", + description: + "Type of step returned by the response. Default provides basic steps intended for web based applications. TurnByTurn provides detailed instructions with more granularity intended for a turn based naviagtion system", + args: { + name: "string", + }, + }, + { + name: "--waypoints", + description: "List of waypoints between the Origin and Destination", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "optimize-waypoints", + description: + "Calculates the optimal order to travel between a set of waypoints to minimize either the travel time or the distance travelled during the journey, based on road network restrictions and the traffic pattern data", + options: [ + { + name: "--avoid", + description: + "Features that are avoided while calculating a route. Avoidance is on a best-case basis. If an avoidance can't be satisfied for a particular case, this setting is ignored", + args: { + name: "structure", + }, + }, + { + name: "--departure-time", + description: + "Departure time from the waypoint. Time format:YYYY-MM-DDThh:mm:ss.sssZ | YYYY-MM-DDThh:mm:ss.sss+hh:mm Examples: 2020-04-22T17:57:24Z 2020-04-22T17:57:24+02:00", + args: { + name: "string", + }, + }, + { + name: "--destination", + description: + "The final position for the route in the World Geodetic System (WGS 84) format: [longitude, latitude]", + args: { + name: "list", + }, + }, + { + name: "--destination-options", + description: "Destination related options", + args: { + name: "structure", + }, + }, + { + name: "--driver", + description: "Driver related options", + args: { + name: "structure", + }, + }, + { + name: "--exclude", + description: + "Features to be strictly excluded while calculating the route", + args: { + name: "structure", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--optimize-sequencing-for", + description: + "Specifies the optimization criteria for the calculated sequence. Default Value: FastestRoute", + args: { + name: "string", + }, + }, + { + name: "--origin", + description: "The start position for the route", + args: { + name: "list", + }, + }, + { + name: "--origin-options", + description: "Origin related options", + args: { + name: "structure", + }, + }, + { + name: "--traffic", + description: "Traffic-related options", + args: { + name: "structure", + }, + }, + { + name: "--travel-mode", + description: + "Specifies the mode of transport when calculating a route. Used in estimating the speed of travel and road compatibility. Default Value: Car", + args: { + name: "string", + }, + }, + { + name: "--travel-mode-options", + description: + "Travel mode related options for the provided travel mode", + args: { + name: "structure", + }, + }, + { + name: "--waypoints", + description: "List of waypoints between the Origin and Destination", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "snap-to-roads", + description: + "The SnapToRoads action matches GPS trace to roads most likely traveled on", + options: [ + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--snapped-geometry-format", + description: + "Chooses what the returned SnappedGeometry format should be. Default Value: FlexiblePolyline", + args: { + name: "string", + }, + }, + { + name: "--snap-radius", + description: + "The radius around the provided tracepoint that is considered for snapping. Unit: meters Default value: 300", + args: { + name: "long", + }, + }, + { + name: "--trace-points", + description: + "List of trace points to be snapped onto the road network", + args: { + name: "list", + }, + }, + { + name: "--travel-mode", + description: + "Specifies the mode of transport when calculating a route. Used in estimating the speed of travel and road compatibility. Default Value: Car", + args: { + name: "string", + }, + }, + { + name: "--travel-mode-options", + description: + "Travel mode related options for the provided travel mode", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + ], +}; + +export default completionSpec; diff --git a/src/aws/glue.ts b/src/aws/glue.ts index 18588bd079c3..70ec3c4c0909 100644 --- a/src/aws/glue.ts +++ b/src/aws/glue.ts @@ -1055,6 +1055,96 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-column-statistics-task-settings", + description: "Creates settings for a column statistics task", + options: [ + { + name: "--database-name", + description: "The name of the database where the table resides", + args: { + name: "string", + }, + }, + { + name: "--table-name", + description: + "The name of the table for which to generate column statistics", + args: { + name: "string", + }, + }, + { + name: "--role", + description: "The role used for running the column statistics", + args: { + name: "string", + }, + }, + { + name: "--schedule", + description: + "A schedule for running the column statistics, specified in CRON syntax", + args: { + name: "string", + }, + }, + { + name: "--column-name-list", + description: "A list of column names for which to run statistics", + args: { + name: "list", + }, + }, + { + name: "--sample-size", + description: "The percentage of data to sample", + args: { + name: "double", + }, + }, + { + name: "--catalog-id", + description: + "The ID of the Data Catalog in which the database resides", + args: { + name: "string", + }, + }, + { + name: "--security-configuration", + description: + "Name of the security configuration that is used to encrypt CloudWatch logs", + args: { + name: "string", + }, + }, + { + name: "--tags", + description: "A map of tags", + args: { + name: "map", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-connection", description: @@ -2931,6 +3021,44 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-column-statistics-task-settings", + description: "Deletes settings for a column statistics task", + options: [ + { + name: "--database-name", + description: "The name of the database where the table resides", + args: { + name: "string", + }, + }, + { + name: "--table-name", + description: + "The name of the table for which to delete column statistics", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-connection", description: "Deletes a connection from the Data Catalog", @@ -4264,6 +4392,44 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-column-statistics-task-settings", + description: "Gets settings for a column statistics task", + options: [ + { + name: "--database-name", + description: "The name of the database where the table resides", + args: { + name: "string", + }, + }, + { + name: "--table-name", + description: + "The name of the table for which to retrieve column statistics", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-connection", description: "Retrieves a connection definition from the Data Catalog", @@ -9629,6 +9795,44 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "start-column-statistics-task-run-schedule", + description: "Starts a column statistics task run schedule", + options: [ + { + name: "--database-name", + description: "The name of the database where the table resides", + args: { + name: "string", + }, + }, + { + name: "--table-name", + description: + "The name of the table for which to start a column statistic task run schedule", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "start-crawler", description: @@ -10236,6 +10440,44 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "stop-column-statistics-task-run-schedule", + description: "Stops a column statistics task run schedule", + options: [ + { + name: "--database-name", + description: "The name of the database where the table resides", + args: { + name: "string", + }, + }, + { + name: "--table-name", + description: + "The name of the table for which to stop a column statistic task run schedule", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "stop-crawler", description: "If the specified crawler is running, stops the crawl", @@ -10730,6 +10972,89 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "update-column-statistics-task-settings", + description: "Updates settings for a column statistics task", + options: [ + { + name: "--database-name", + description: "The name of the database where the table resides", + args: { + name: "string", + }, + }, + { + name: "--table-name", + description: + "The name of the table for which to generate column statistics", + args: { + name: "string", + }, + }, + { + name: "--role", + description: "The role used for running the column statistics", + args: { + name: "string", + }, + }, + { + name: "--schedule", + description: + "A schedule for running the column statistics, specified in CRON syntax", + args: { + name: "string", + }, + }, + { + name: "--column-name-list", + description: "A list of column names for which to run statistics", + args: { + name: "list", + }, + }, + { + name: "--sample-size", + description: "The percentage of data to sample", + args: { + name: "double", + }, + }, + { + name: "--catalog-id", + description: + "The ID of the Data Catalog in which the database resides", + args: { + name: "string", + }, + }, + { + name: "--security-configuration", + description: + "Name of the security configuration that is used to encrypt CloudWatch logs", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "update-connection", description: "Updates a connection definition in the Data Catalog", diff --git a/src/aws/keyspaces.ts b/src/aws/keyspaces.ts index 954637101322..69074d9bfba5 100644 --- a/src/aws/keyspaces.ts +++ b/src/aws/keyspaces.ts @@ -6,7 +6,7 @@ const completionSpec: Fig.Spec = { { name: "create-keyspace", description: - "The CreateKeyspace operation adds a new keyspace to your account. In an Amazon Web Services account, keyspace names must be unique within each Region. CreateKeyspace is an asynchronous operation. You can monitor the creation status of the new keyspace by using the GetKeyspace operation. For more information, see Creating keyspaces in the Amazon Keyspaces Developer Guide", + "The CreateKeyspace operation adds a new keyspace to your account. In an Amazon Web Services account, keyspace names must be unique within each Region. CreateKeyspace is an asynchronous operation. You can monitor the creation status of the new keyspace by using the GetKeyspace operation. For more information, see Create a keyspace in the Amazon Keyspaces Developer Guide", options: [ { name: "--keyspace-name", @@ -53,7 +53,7 @@ const completionSpec: Fig.Spec = { { name: "create-table", description: - "The CreateTable operation adds a new table to the specified keyspace. Within a keyspace, table names must be unique. CreateTable is an asynchronous operation. When the request is received, the status of the table is set to CREATING. You can monitor the creation status of the new table by using the GetTable operation, which returns the current status of the table. You can start using a table when the status is ACTIVE. For more information, see Creating tables in the Amazon Keyspaces Developer Guide", + "The CreateTable operation adds a new table to the specified keyspace. Within a keyspace, table names must be unique. CreateTable is an asynchronous operation. When the request is received, the status of the table is set to CREATING. You can monitor the creation status of the new table by using the GetTable operation, which returns the current status of the table. You can start using a table when the status is ACTIVE. For more information, see Create a table in the Amazon Keyspaces Developer Guide", options: [ { name: "--keyspace-name", @@ -177,6 +177,53 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-type", + description: + "The CreateType operation creates a new user-defined type in the specified keyspace. For more information, see User-defined types (UDTs) in the Amazon Keyspaces Developer Guide", + options: [ + { + name: "--keyspace-name", + description: "The name of the keyspace", + args: { + name: "string", + }, + }, + { + name: "--type-name", + description: + "The name of the user-defined type. UDT names must contain 48 characters or less, must begin with an alphabetic character, and can only contain alpha-numeric characters and underscores. Amazon Keyspaces converts upper case characters automatically into lower case characters. Alternatively, you can declare a UDT name in double quotes. When declaring a UDT name inside double quotes, Amazon Keyspaces preserves upper casing and allows special characters. You can also use double quotes as part of the name when you create the UDT, but you must escape each double quote character with an additional double quote character", + args: { + name: "string", + }, + }, + { + name: "--field-definitions", + description: + "The field definitions, consisting of names and types, that define this type", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-keyspace", description: @@ -246,6 +293,44 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-type", + description: + "The DeleteType operation deletes a user-defined type (UDT). You can only delete a type that is not used in a table or another UDT", + options: [ + { + name: "--keyspace-name", + description: "The name of the keyspace of the to be deleted type", + args: { + name: "string", + }, + }, + { + name: "--type-name", + description: "The name of the type to be deleted", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-keyspace", description: @@ -280,7 +365,7 @@ const completionSpec: Fig.Spec = { { name: "get-table", description: - "Returns information about the table, including the table's name and current status, the keyspace name, configuration settings, and metadata. To read table metadata using GetTable, Select action permissions for the table and system tables are required to complete the operation", + "Returns information about the table, including the table's name and current status, the keyspace name, configuration settings, and metadata. To read table metadata using GetTable, the IAM principal needs Select action permissions for the table and the system keyspace", options: [ { name: "--keyspace-name", @@ -353,9 +438,48 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-type", + description: + "The GetType operation returns information about the type, for example the field definitions, the timestamp when the type was last modified, the level of nesting, the status, and details about if the type is used in other types and tables. To read keyspace metadata using GetType, the IAM principal needs Select action permissions for the system keyspace", + options: [ + { + name: "--keyspace-name", + description: "The name of the keyspace that contains this type", + args: { + name: "string", + }, + }, + { + name: "--type-name", + description: + "The formatted name of the type. For example, if the name of the type was created without double quotes, Amazon Keyspaces saved the name in lower-case characters. If the name was created in double quotes, you must use double quotes to specify the type name", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-keyspaces", - description: "Returns a list of keyspaces", + description: "The ListKeyspaces operation returns a list of keyspaces", options: [ { name: "--next-token", @@ -418,7 +542,8 @@ const completionSpec: Fig.Spec = { }, { name: "list-tables", - description: "Returns a list of tables for a specified keyspace", + description: + "The ListTables operation returns a list of tables for a specified keyspace. To read keyspace metadata using ListTables, the IAM principal needs Select action permissions for the system keyspace", options: [ { name: "--next-token", @@ -489,7 +614,7 @@ const completionSpec: Fig.Spec = { { name: "list-tags-for-resource", description: - "Returns a list of all tags associated with the specified Amazon Keyspaces resource", + "Returns a list of all tags associated with the specified Amazon Keyspaces resource. To read keyspace metadata using ListTagsForResource, the IAM principal needs Select action permissions for the specified resource and the system keyspace", options: [ { name: "--resource-arn", @@ -558,6 +683,78 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-types", + description: + "The ListTypes operation returns a list of types for a specified keyspace. To read keyspace metadata using ListTypes, the IAM principal needs Select action permissions for the system keyspace", + options: [ + { + name: "--next-token", + description: + "The pagination token. To resume pagination, provide the NextToken value as an argument of a subsequent API invocation", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The total number of types to return in the output. If the total number of types available is more than the value specified, a NextToken is provided in the output. To resume pagination, provide the NextToken value as an argument of a subsequent API invocation", + args: { + name: "integer", + }, + }, + { + name: "--keyspace-name", + description: + "The name of the keyspace that contains the listed types", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "restore-table", description: diff --git a/src/aws/logs.ts b/src/aws/logs.ts index 6492cc2d2e4b..a3d6458bd8e5 100644 --- a/src/aws/logs.ts +++ b/src/aws/logs.ts @@ -85,7 +85,7 @@ const completionSpec: Fig.Spec = { { name: "create-delivery", description: - "Creates a delivery. A delivery is a connection between a logical delivery source and a logical delivery destination that you have already created. Only some Amazon Web Services services support being configured as a delivery source using this operation. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. A delivery destination can represent a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose. To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following: Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource. Create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. You can't update an existing delivery. You can only create and delete deliveries", + "Creates a delivery. A delivery is a connection between a logical delivery source and a logical delivery destination that you have already created. Only some Amazon Web Services services support being configured as a delivery source using this operation. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. A delivery destination can represent a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose. To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following: Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource. Create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. To update an existing delivery configuration, use UpdateDeliveryConfiguration", options: [ { name: "--delivery-source-name", @@ -106,7 +106,7 @@ const completionSpec: Fig.Spec = { { name: "--record-fields", description: - "The list of record fields to be delivered to the destination, in order. If the delivery\u2019s log source has mandatory fields, they must be included in this list", + "The list of record fields to be delivered to the destination, in order. If the delivery's log source has mandatory fields, they must be included in this list", args: { name: "list", }, @@ -122,7 +122,7 @@ const completionSpec: Fig.Spec = { { name: "--s3-delivery-configuration", description: - "This structure contains parameters that are valid only when the delivery\u2019s delivery destination is an S3 bucket", + "This structure contains parameters that are valid only when the delivery's delivery destination is an S3 bucket", args: { name: "structure", }, @@ -208,7 +208,7 @@ const completionSpec: Fig.Spec = { { name: "--destination-prefix", description: - "The prefix used as the start of the key for every object exported. If you don't specify a value, the default is exportedlogs", + "The prefix used as the start of the key for every object exported. If you don't specify a value, the default is exportedlogs. The length of this parameter must comply with the S3 object key name length limits. The object key name is a sequence of Unicode characters with UTF-8 encoding, and can be up to 1,024 bytes", args: { name: "string", }, @@ -1393,7 +1393,7 @@ const completionSpec: Fig.Spec = { { name: "describe-log-groups", description: - "Lists the specified log groups. You can list all your log groups or filter the results by prefix. The results are ASCII-sorted by log group name. CloudWatch Logs doesn\u2019t support IAM policies that control access to the DescribeLogGroups action by using the aws:ResourceTag/key-name condition key. Other CloudWatch Logs actions do support the use of the aws:ResourceTag/key-name condition key to control access. For more information about using tags to control access, see Controlling access to Amazon Web Services resources using tags. If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability", + "Lists the specified log groups. You can list all your log groups or filter the results by prefix. The results are ASCII-sorted by log group name. CloudWatch Logs doesn't support IAM policies that control access to the DescribeLogGroups action by using the aws:ResourceTag/key-name condition key. Other CloudWatch Logs actions do support the use of the aws:ResourceTag/key-name condition key to control access. For more information about using tags to control access, see Controlling access to Amazon Web Services resources using tags. If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability", options: [ { name: "--account-identifiers", @@ -3349,7 +3349,7 @@ const completionSpec: Fig.Spec = { { name: "put-retention-policy", description: - "Sets the retention of the specified log group. With a retention policy, you can configure the number of days for which to retain log events in the specified log group. CloudWatch Logs doesn\u2019t immediately delete log events when they reach their retention setting. It typically takes up to 72 hours after that before log events are deleted, but in rare situations might take longer. To illustrate, imagine that you change a log group to have a longer retention setting when it contains log events that are past the expiration date, but haven\u2019t been deleted. Those log events will take up to 72 hours to be deleted after the new retention date is reached. To make sure that log data is deleted permanently, keep a log group at its lower retention setting until 72 hours after the previous retention period ends. Alternatively, wait to change the retention setting until you confirm that the earlier log events are deleted. When log events reach their retention setting they are marked for deletion. After they are marked for deletion, they do not add to your archival storage costs anymore, even if they are not actually deleted until later. These log events marked for deletion are also not included when you use an API to retrieve the storedBytes value to see how many bytes a log group is storing", + "Sets the retention of the specified log group. With a retention policy, you can configure the number of days for which to retain log events in the specified log group. CloudWatch Logs doesn't immediately delete log events when they reach their retention setting. It typically takes up to 72 hours after that before log events are deleted, but in rare situations might take longer. To illustrate, imagine that you change a log group to have a longer retention setting when it contains log events that are past the expiration date, but haven't been deleted. Those log events will take up to 72 hours to be deleted after the new retention date is reached. To make sure that log data is deleted permanently, keep a log group at its lower retention setting until 72 hours after the previous retention period ends. Alternatively, wait to change the retention setting until you confirm that the earlier log events are deleted. When log events reach their retention setting they are marked for deletion. After they are marked for deletion, they do not add to your archival storage costs anymore, even if they are not actually deleted until later. These log events marked for deletion are also not included when you use an API to retrieve the storedBytes value to see how many bytes a log group is storing", options: [ { name: "--log-group-name", @@ -3554,7 +3554,7 @@ const completionSpec: Fig.Spec = { { name: "--limit", description: - "The maximum number of log events to return in the query. If the query string uses the fields command, only the specified fields and their values are returned. The default is 1000", + "The maximum number of log events to return in the query. If the query string uses the fields command, only the specified fields and their values are returned. The default is 10,000", args: { name: "integer", }, @@ -3613,7 +3613,7 @@ const completionSpec: Fig.Spec = { { name: "tag-log-group", description: - "The TagLogGroup operation is on the path to deprecation. We recommend that you use TagResource instead. Adds or updates the specified tags for the specified log group. To list the tags for a log group, use ListTagsForResource. To remove tags, use UntagResource. For more information about tags, see Tag Log Groups in Amazon CloudWatch Logs in the Amazon CloudWatch Logs User Guide. CloudWatch Logs doesn\u2019t support IAM policies that prevent users from assigning specified tags to log groups using the aws:Resource/key-name or aws:TagKeys condition keys. For more information about using tags to control access, see Controlling access to Amazon Web Services resources using tags", + "The TagLogGroup operation is on the path to deprecation. We recommend that you use TagResource instead. Adds or updates the specified tags for the specified log group. To list the tags for a log group, use ListTagsForResource. To remove tags, use UntagResource. For more information about tags, see Tag Log Groups in Amazon CloudWatch Logs in the Amazon CloudWatch Logs User Guide. CloudWatch Logs doesn't support IAM policies that prevent users from assigning specified tags to log groups using the aws:Resource/key-name or aws:TagKeys condition keys. For more information about using tags to control access, see Controlling access to Amazon Web Services resources using tags", options: [ { name: "--log-group-name", @@ -3730,7 +3730,7 @@ const completionSpec: Fig.Spec = { { name: "untag-log-group", description: - "The UntagLogGroup operation is on the path to deprecation. We recommend that you use UntagResource instead. Removes the specified tags from the specified log group. To list the tags for a log group, use ListTagsForResource. To add tags, use TagResource. CloudWatch Logs doesn\u2019t support IAM policies that prevent users from assigning specified tags to log groups using the aws:Resource/key-name or aws:TagKeys condition keys", + "The UntagLogGroup operation is on the path to deprecation. We recommend that you use UntagResource instead. Removes the specified tags from the specified log group. To list the tags for a log group, use ListTagsForResource. To add tags, use TagResource. CloudWatch Logs doesn't support IAM policies that prevent users from assigning specified tags to log groups using the aws:Resource/key-name or aws:TagKeys condition keys", options: [ { name: "--log-group-name", @@ -3807,7 +3807,7 @@ const completionSpec: Fig.Spec = { { name: "update-anomaly", description: - "Use this operation to suppress anomaly detection for a specified anomaly or pattern. If you suppress an anomaly, CloudWatch Logs won\u2019t report new occurrences of that anomaly and won't update that anomaly with new data. If you suppress a pattern, CloudWatch Logs won\u2019t report any anomalies related to that pattern. You must specify either anomalyId or patternId, but you can't specify both parameters in the same operation. If you have previously used this operation to suppress detection of a pattern or anomaly, you can use it again to cause CloudWatch Logs to end the suppression. To do this, use this operation and specify the anomaly or pattern to stop suppressing, and omit the suppressionType and suppressionPeriod parameters", + "Use this operation to suppress anomaly detection for a specified anomaly or pattern. If you suppress an anomaly, CloudWatch Logs won't report new occurrences of that anomaly and won't update that anomaly with new data. If you suppress a pattern, CloudWatch Logs won't report any anomalies related to that pattern. You must specify either anomalyId or patternId, but you can't specify both parameters in the same operation. If you have previously used this operation to suppress detection of a pattern or anomaly, you can use it again to cause CloudWatch Logs to end the suppression. To do this, use this operation and specify the anomaly or pattern to stop suppressing, and omit the suppressionType and suppressionPeriod parameters", options: [ { name: "--anomaly-id", @@ -3849,6 +3849,16 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--baseline", + description: + "Set this to true to prevent CloudWatch Logs from displaying this behavior as an anomaly in the future. The behavior is then treated as baseline behavior. However, if similar but more severe occurrences of this behavior occur in the future, those will still be reported as anomalies. The default is false", + }, + { + name: "--no-baseline", + description: + "Set this to true to prevent CloudWatch Logs from displaying this behavior as an anomaly in the future. The behavior is then treated as baseline behavior. However, if similar but more severe occurrences of this behavior occur in the future, those will still be reported as anomalies. The default is false", + }, { name: "--cli-input-json", description: @@ -3883,7 +3893,7 @@ const completionSpec: Fig.Spec = { { name: "--record-fields", description: - "The list of record fields to be delivered to the destination, in order. If the delivery\u2019s log source has mandatory fields, they must be included in this list", + "The list of record fields to be delivered to the destination, in order. If the delivery's log source has mandatory fields, they must be included in this list", args: { name: "list", }, @@ -3899,7 +3909,7 @@ const completionSpec: Fig.Spec = { { name: "--s3-delivery-configuration", description: - "This structure contains parameters that are valid only when the delivery\u2019s delivery destination is an S3 bucket", + "This structure contains parameters that are valid only when the delivery's delivery destination is an S3 bucket", args: { name: "structure", }, diff --git a/src/aws/mediapackagev2.ts b/src/aws/mediapackagev2.ts index f978607053aa..c7211ae22aff 100644 --- a/src/aws/mediapackagev2.ts +++ b/src/aws/mediapackagev2.ts @@ -3,6 +3,69 @@ const completionSpec: Fig.Spec = { description: "This guide is intended for creating AWS Elemental MediaPackage resources in MediaPackage Version 2 (v2) starting from May 2023. To get started with MediaPackage v2, create your MediaPackage resources. There isn't an automated process to migrate your resources from MediaPackage v1 to MediaPackage v2. The names of the entities that you use to access this API, like URLs and ARNs, all have the versioning information added, like \"v2\", to distinguish from the prior version. If you used MediaPackage prior to this release, you can't use the MediaPackage v2 CLI or the MediaPackage v2 API to access any MediaPackage v1 resources. If you created resources in MediaPackage v1, use video on demand (VOD) workflows, and aren't looking to migrate to MediaPackage v2 yet, see the MediaPackage v1 Live API Reference. This is the AWS Elemental MediaPackage v2 Live REST API Reference. It describes all the MediaPackage API operations for live content in detail, and provides sample requests, responses, and errors for the supported web services protocols. We assume that you have the IAM permissions that you need to use MediaPackage via the REST API. We also assume that you are familiar with the features and operations of MediaPackage, as described in the AWS Elemental MediaPackage User Guide", subcommands: [ + { + name: "cancel-harvest-job", + description: "Cancels an in-progress harvest job", + options: [ + { + name: "--channel-group-name", + description: + "The name of the channel group containing the channel from which the harvest job is running", + args: { + name: "string", + }, + }, + { + name: "--channel-name", + description: + "The name of the channel from which the harvest job is running", + args: { + name: "string", + }, + }, + { + name: "--origin-endpoint-name", + description: + "The name of the origin endpoint that the harvest job is harvesting from. This cannot be changed after the harvest job is submitted", + args: { + name: "string", + }, + }, + { + name: "--harvest-job-name", + description: + "The name of the harvest job to cancel. This name must be unique within the channel and cannot be changed after the harvest job is submitted", + args: { + name: "string", + }, + }, + { + name: "--e-tag", + description: + "The current Entity Tag (ETag) associated with the harvest job. Used for concurrency control", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-channel", description: @@ -131,6 +194,106 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-harvest-job", + description: + "Creates a new harvest job to export content from a MediaPackage v2 channel to an S3 bucket", + options: [ + { + name: "--channel-group-name", + description: + "The name of the channel group containing the channel from which to harvest content", + args: { + name: "string", + }, + }, + { + name: "--channel-name", + description: "The name of the channel from which to harvest content", + args: { + name: "string", + }, + }, + { + name: "--origin-endpoint-name", + description: + "The name of the origin endpoint from which to harvest content", + args: { + name: "string", + }, + }, + { + name: "--description", + description: "An optional description for the harvest job", + args: { + name: "string", + }, + }, + { + name: "--harvested-manifests", + description: "A list of manifests to be harvested", + args: { + name: "structure", + }, + }, + { + name: "--schedule-configuration", + description: + "The configuration for when the harvest job should run, including start and end times", + args: { + name: "structure", + }, + }, + { + name: "--destination", + description: + "The S3 destination where the harvested content will be placed", + args: { + name: "structure", + }, + }, + { + name: "--client-token", + description: + "A unique, case-sensitive identifier that you provide to ensure the idempotency of the request", + args: { + name: "string", + }, + }, + { + name: "--harvest-job-name", + description: + "A name for the harvest job. This name must be unique within the channel", + args: { + name: "string", + }, + }, + { + name: "--tags", + description: "A collection of tags associated with the harvest job", + args: { + name: "map", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-origin-endpoint", description: @@ -573,6 +736,60 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-harvest-job", + description: "Retrieves the details of a specific harvest job", + options: [ + { + name: "--channel-group-name", + description: + "The name of the channel group containing the channel associated with the harvest job", + args: { + name: "string", + }, + }, + { + name: "--channel-name", + description: + "The name of the channel associated with the harvest job", + args: { + name: "string", + }, + }, + { + name: "--origin-endpoint-name", + description: + "The name of the origin endpoint associated with the harvest job", + args: { + name: "string", + }, + }, + { + name: "--harvest-job-name", + description: "The name of the harvest job to retrieve", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-origin-endpoint", description: @@ -805,6 +1022,102 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-harvest-jobs", + description: + "Retrieves a list of harvest jobs that match the specified criteria", + options: [ + { + name: "--channel-group-name", + description: + "The name of the channel group to filter the harvest jobs by. If specified, only harvest jobs associated with channels in this group will be returned", + args: { + name: "string", + }, + }, + { + name: "--channel-name", + description: + "The name of the channel to filter the harvest jobs by. If specified, only harvest jobs associated with this channel will be returned", + args: { + name: "string", + }, + }, + { + name: "--origin-endpoint-name", + description: + "The name of the origin endpoint to filter the harvest jobs by. If specified, only harvest jobs associated with this origin endpoint will be returned", + args: { + name: "string", + }, + }, + { + name: "--status", + description: + "The status to filter the harvest jobs by. If specified, only harvest jobs with this status will be returned", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum number of harvest jobs to return in a single request. If not specified, a default value will be used", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: + "A token used for pagination. Provide this value in subsequent requests to retrieve the next set of results", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-origin-endpoints", description: @@ -1316,6 +1629,68 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "wait", + description: + "Wait until a particular condition is satisfied. Each subcommand polls an API until the listed requirement is met", + subcommands: [ + { + name: "harvest-job-finished", + description: + "Wait until JMESPath query Status returns COMPLETED when polling with ``get-harvest-job``. It will poll every 2 seconds until a successful state has been reached. This will exit with a return code of 255 after 60 failed checks", + options: [ + { + name: "--channel-group-name", + description: + "The name of the channel group containing the channel associated with the harvest job", + args: { + name: "string", + }, + }, + { + name: "--channel-name", + description: + "The name of the channel associated with the harvest job", + args: { + name: "string", + }, + }, + { + name: "--origin-endpoint-name", + description: + "The name of the origin endpoint associated with the harvest job", + args: { + name: "string", + }, + }, + { + name: "--harvest-job-name", + description: "The name of the harvest job to retrieve", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + ], + }, ], }; export default completionSpec; diff --git a/src/aws/mwaa.ts b/src/aws/mwaa.ts index dccbb56456b1..9d340db41b80 100644 --- a/src/aws/mwaa.ts +++ b/src/aws/mwaa.ts @@ -1,7 +1,7 @@ const completionSpec: Fig.Spec = { name: "mwaa", description: - "Amazon Managed Workflows for Apache Airflow This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation. For more information, see What is Amazon MWAA?. Endpoints api.airflow.{region}.amazonaws.com - This endpoint is used for environment management. CreateEnvironment DeleteEnvironment GetEnvironment ListEnvironments ListTagsForResource TagResource UntagResource UpdateEnvironment env.airflow.{region}.amazonaws.com - This endpoint is used to operate the Airflow environment. CreateCliToken CreateWebLoginToken Regions For a list of supported regions, see Amazon MWAA endpoints and quotas in the Amazon Web Services General Reference", + "Amazon Managed Workflows for Apache Airflow This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation. For more information, see What is Amazon MWAA?. Endpoints api.airflow.{region}.amazonaws.com - This endpoint is used for environment management. CreateEnvironment DeleteEnvironment GetEnvironment ListEnvironments ListTagsForResource TagResource UntagResource UpdateEnvironment env.airflow.{region}.amazonaws.com - This endpoint is used to operate the Airflow environment. CreateCliToken CreateWebLoginToken InvokeRestApi Regions For a list of supported regions, see Amazon MWAA endpoints and quotas in the Amazon Web Services General Reference", subcommands: [ { name: "create-cli-token", @@ -38,7 +38,7 @@ const completionSpec: Fig.Spec = { { name: "create-environment", description: - "Creates an Amazon Managed Workflows for Apache Airflow (MWAA) environment", + "Creates an Amazon Managed Workflows for Apache Airflow (Amazon MWAA) environment", options: [ { name: "--name", @@ -163,7 +163,7 @@ const completionSpec: Fig.Spec = { { name: "--airflow-version", description: - "The Apache Airflow version for your environment. If no value is specified, it defaults to the latest version. For more information, see Apache Airflow versions on Amazon Managed Workflows for Apache Airflow (MWAA). Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2 2.8.1", + "The Apache Airflow version for your environment. If no value is specified, it defaults to the latest version. For more information, see Apache Airflow versions on Amazon Managed Workflows for Apache Airflow (Amazon MWAA). Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, and 2.10.1", args: { name: "string", }, @@ -294,7 +294,7 @@ const completionSpec: Fig.Spec = { { name: "delete-environment", description: - "Deletes an Amazon Managed Workflows for Apache Airflow (MWAA) environment", + "Deletes an Amazon Managed Workflows for Apache Airflow (Amazon MWAA) environment", options: [ { name: "--name", @@ -355,6 +355,70 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "invoke-rest-api", + description: + "Invokes the Apache Airflow REST API on the webserver with the specified inputs. To learn more, see Using the Apache Airflow REST API", + options: [ + { + name: "--name", + description: + "The name of the Amazon MWAA environment. For example, MyMWAAEnvironment", + args: { + name: "string", + }, + }, + { + name: "--path", + description: + "The Apache Airflow REST API endpoint path to be called. For example, /dags/123456/clearTaskInstances. For more information, see Apache Airflow API", + args: { + name: "string", + }, + }, + { + name: "--method", + description: + "The HTTP method used for making Airflow REST API calls. For example, POST", + args: { + name: "string", + }, + }, + { + name: "--query-parameters", + description: + "Query parameters to be included in the Apache Airflow REST API call, provided as a JSON object", + args: { + name: "structure", + }, + }, + { + name: "--body", + description: + "The request body for the Apache Airflow REST API call, provided as a JSON object", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-environments", description: @@ -593,7 +657,7 @@ const completionSpec: Fig.Spec = { { name: "--airflow-version", description: - "The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA. Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating your resources, see Upgrading an Amazon MWAA environment. Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1", + "The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA. Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating your resources, see Upgrading an Amazon MWAA environment. Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, and 2.10.1", args: { name: "string", }, diff --git a/src/aws/network-firewall.ts b/src/aws/network-firewall.ts index 0e8ee88af9dc..d754867f4ddc 100644 --- a/src/aws/network-firewall.ts +++ b/src/aws/network-firewall.ts @@ -1,7 +1,7 @@ const completionSpec: Fig.Spec = { name: "network-firewall", description: - "This is the API Reference for Network Firewall. This guide is for developers who need detailed information about the Network Firewall API actions, data types, and errors. The REST API requires you to handle connection details, such as calculating signatures, handling request retries, and error handling. For general information about using the Amazon Web Services REST APIs, see Amazon Web Services APIs. To access Network Firewall using the REST API endpoint: https://network-firewall..amazonaws.com Alternatively, you can use one of the Amazon Web Services SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see Amazon Web Services SDKs. For descriptions of Network Firewall features, including and step-by-step instructions on how to use them through the Network Firewall console, see the Network Firewall Developer Guide. Network Firewall is a stateful, managed, network firewall and intrusion detection and prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the perimeter of your VPC. This includes filtering traffic going to and coming from an internet gateway, NAT gateway, or over VPN or Direct Connect. Network Firewall uses rules that are compatible with Suricata, a free, open source network analysis and threat detection engine. Network Firewall supports Suricata version 6.0.9. For information about Suricata, see the Suricata website. You can use Network Firewall to monitor and protect your VPC traffic in a number of ways. The following are just a few examples: Allow domains or IP addresses for known Amazon Web Services service endpoints, such as Amazon S3, and block all other forms of traffic. Use custom lists of known bad domains to limit the types of domain names that your applications can access. Perform deep packet inspection on traffic entering or leaving your VPC. Use stateful protocol detection to filter protocols like HTTPS, regardless of the port used. To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide. To start using Network Firewall, do the following: (Optional) If you don't already have a VPC that you want to protect, create it in Amazon VPC. In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a subnet for the sole use of Network Firewall. In Network Firewall, create stateless and stateful rule groups, to define the components of the network traffic filtering behavior that you want your firewall to have. In Network Firewall, create a firewall policy that uses your rule groups and specifies additional default traffic filtering behavior. In Network Firewall, create a firewall and specify your new firewall policy and VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you specify, with the behavior that's defined in the firewall policy. In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall endpoints", + "This is the API Reference for Network Firewall. This guide is for developers who need detailed information about the Network Firewall API actions, data types, and errors. The REST API requires you to handle connection details, such as calculating signatures, handling request retries, and error handling. For general information about using the Amazon Web Services REST APIs, see Amazon Web Services APIs. To access Network Firewall using the REST API endpoint: https://network-firewall..amazonaws.com Alternatively, you can use one of the Amazon Web Services SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see Amazon Web Services SDKs. For descriptions of Network Firewall features, including and step-by-step instructions on how to use them through the Network Firewall console, see the Network Firewall Developer Guide. Network Firewall is a stateful, managed, network firewall and intrusion detection and prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the perimeter of your VPC. This includes filtering traffic going to and coming from an internet gateway, NAT gateway, or over VPN or Direct Connect. Network Firewall uses rules that are compatible with Suricata, a free, open source network analysis and threat detection engine. You can use Network Firewall to monitor and protect your VPC traffic in a number of ways. The following are just a few examples: Allow domains or IP addresses for known Amazon Web Services service endpoints, such as Amazon S3, and block all other forms of traffic. Use custom lists of known bad domains to limit the types of domain names that your applications can access. Perform deep packet inspection on traffic entering or leaving your VPC. Use stateful protocol detection to filter protocols like HTTPS, regardless of the port used. To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide. To start using Network Firewall, do the following: (Optional) If you don't already have a VPC that you want to protect, create it in Amazon VPC. In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a subnet for the sole use of Network Firewall. In Network Firewall, create stateless and stateful rule groups, to define the components of the network traffic filtering behavior that you want your firewall to have. In Network Firewall, create a firewall policy that uses your rule groups and specifies additional default traffic filtering behavior. In Network Firewall, create a firewall and specify your new firewall policy and VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you specify, with the behavior that's defined in the firewall policy. In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall endpoints", subcommands: [ { name: "associate-firewall-policy", diff --git a/src/aws/opensearch.ts b/src/aws/opensearch.ts index 401aabf4cbd1..6e268a32982c 100644 --- a/src/aws/opensearch.ts +++ b/src/aws/opensearch.ts @@ -184,6 +184,13 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--service", + description: "The Amazon Web Services service SP to grant access to", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -277,6 +284,76 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-application", + description: "Creates an OpenSearch Application", + options: [ + { + name: "--client-token", + description: + "A unique client idempotency token. It will be auto generated if not provided", + args: { + name: "string", + }, + }, + { + name: "--name", + description: + "Name of the OpenSearch Appication to create. Application names are unique across the applications owned by an account within an Amazon Web Services Region", + args: { + name: "string", + }, + }, + { + name: "--data-sources", + description: + "Data sources to be associated with the OpenSearch Application", + args: { + name: "list", + }, + }, + { + name: "--iam-identity-center-options", + description: + "Settings of IAM Identity Center for the OpenSearch Application", + args: { + name: "structure", + }, + }, + { + name: "--app-configs", + description: + "Configurations of the OpenSearch Application, inlcuding admin configuration", + args: { + name: "list", + }, + }, + { + name: "--tag-list", + description: "A list of tags attached to a domain", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-domain", description: @@ -397,6 +474,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--identity-center-options", + description: + "Options for IAM Identity Center Option control for the domain", + args: { + name: "structure", + }, + }, { name: "--tag-list", description: "List of tags to add to the domain upon creation", @@ -612,6 +697,37 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-application", + description: "Deletes an existing OpenSearch Application", + options: [ + { + name: "--id", + description: + "Unique identifier for the OpenSearch Application that you want to delete", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-data-source", description: @@ -1458,6 +1574,38 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-application", + description: + "Check the configuration and status of an existing OpenSearch Application", + options: [ + { + name: "--id", + description: + "Unique identifier of the checked OpenSearch Application", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-compatible-versions", description: @@ -1690,6 +1838,77 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-applications", + description: "List all OpenSearch Applications under your account", + options: [ + { + name: "--next-token", + description: + "When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page", + args: { + name: "string", + }, + }, + { + name: "--statuses", + description: + "OpenSearch Application Status can be used as filters for the listing request. Possible values are CREATING, UPDATING, DELETING, FAILED, ACTIVE, and DELETED", + args: { + name: "list", + }, + }, + { + name: "--max-results", + description: + "An optional parameter that specifies the maximum number of results to return for a given request", + args: { + name: "integer", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-data-sources", description: @@ -2348,6 +2567,13 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--service", + description: "The service SP to revoke access from", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -2460,6 +2686,53 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "update-application", + description: "Update the OpenSearch Application", + options: [ + { + name: "--id", + description: + "Unique identifier of the OpenSearch Application to be updated", + args: { + name: "string", + }, + }, + { + name: "--data-sources", + description: + "Data sources to be associated with the OpenSearch Application", + args: { + name: "list", + }, + }, + { + name: "--app-configs", + description: + "Configurations to be changed for the OpenSearch Application", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "update-data-source", description: @@ -2632,6 +2905,13 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--identity-center-options", + description: "Container for IAM Identity Center Options settings", + args: { + name: "structure", + }, + }, { name: "--auto-tune-options", description: "Options for Auto-Tune", diff --git a/src/aws/opensearchserverless.ts b/src/aws/opensearchserverless.ts index 4e9c364b93d0..e2aa1ca884b6 100644 --- a/src/aws/opensearchserverless.ts +++ b/src/aws/opensearchserverless.ts @@ -350,6 +350,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--iam-identity-center-options", + description: + "Describes IAM Identity Center options in the form of a key-value map. This field is required if you specify iamidentitycenter for the type parameter", + args: { + name: "structure", + }, + }, { name: "--name", description: "The name of the security configuration", @@ -1590,6 +1598,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--iam-identity-center-options-updates", + description: + "Describes IAM Identity Center options in the form of a key-value map", + args: { + name: "structure", + }, + }, { name: "--id", description: diff --git a/src/aws/payment-cryptography-data.ts b/src/aws/payment-cryptography-data.ts index 23c3d92db1a6..611799a142fb 100644 --- a/src/aws/payment-cryptography-data.ts +++ b/src/aws/payment-cryptography-data.ts @@ -307,7 +307,7 @@ const completionSpec: Fig.Spec = { { name: "generate-pin-data", description: - "Generates pin-related data such as PIN, PIN Verification Value (PVV), PIN Block, and PIN Offset during new card issuance or reissuance. For more information, see Generate PIN data in the Amazon Web Services Payment Cryptography User Guide. PIN data is never transmitted in clear to or from Amazon Web Services Payment Cryptography. This operation generates PIN, PVV, or PIN Offset and then encrypts it using Pin Encryption Key (PEK) to create an EncryptedPinBlock for transmission from Amazon Web Services Payment Cryptography. This operation uses a separate Pin Verification Key (PVK) for VISA PVV generation. For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide. Cross-account use: This operation can't be used across different Amazon Web Services accounts. Related operations: GenerateCardValidationData TranslatePinData VerifyPinData", + "Generates pin-related data such as PIN, PIN Verification Value (PVV), PIN Block, and PIN Offset during new card issuance or reissuance. For more information, see Generate PIN data in the Amazon Web Services Payment Cryptography User Guide. PIN data is never transmitted in clear to or from Amazon Web Services Payment Cryptography. This operation generates PIN, PVV, or PIN Offset and then encrypts it using Pin Encryption Key (PEK) to create an EncryptedPinBlock for transmission from Amazon Web Services Payment Cryptography. This operation uses a separate Pin Verification Key (PVK) for VISA PVV generation. Using ECDH key exchange, you can receive cardholder selectable PINs into Amazon Web Services Payment Cryptography. The ECDH derived key protects the incoming PIN block. You can also use it for reveal PIN, wherein the generated PIN block is protected by the ECDH derived key before transmission from Amazon Web Services Payment Cryptography. For more information on establishing ECDH derived keys, see the Generating keys in the Amazon Web Services Payment Cryptography User Guide. For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide. Cross-account use: This operation can't be used across different Amazon Web Services accounts. Related operations: GenerateCardValidationData TranslatePinData VerifyPinData", options: [ { name: "--generation-key-identifier", @@ -320,7 +320,7 @@ const completionSpec: Fig.Spec = { { name: "--encryption-key-identifier", description: - "The keyARN of the PEK that Amazon Web Services Payment Cryptography uses to encrypt the PIN Block", + "The keyARN of the PEK that Amazon Web Services Payment Cryptography uses to encrypt the PIN Block. For ECDH, it is the keyARN of the asymmetric ECC key", args: { name: "string", }, @@ -356,6 +356,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--encryption-wrapped-key", + description: + "Parameter information of a WrappedKeyBlock for encryption key exchange", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: @@ -457,12 +465,12 @@ const completionSpec: Fig.Spec = { { name: "translate-pin-data", description: - "Translates encrypted PIN block from and to ISO 9564 formats 0,1,3,4. For more information, see Translate PIN data in the Amazon Web Services Payment Cryptography User Guide. PIN block translation involves changing the encrytion of PIN block from one encryption key to another encryption key and changing PIN block format from one to another without PIN block data leaving Amazon Web Services Payment Cryptography. The encryption key transformation can be from PEK (Pin Encryption Key) to BDK (Base Derivation Key) for DUKPT or from BDK for DUKPT to PEK. Amazon Web Services Payment Cryptography supports TDES and AES key derivation type for DUKPT translations. This operation also supports dynamic keys, allowing you to pass a dynamic PEK as a TR-31 WrappedKeyBlock. This can be used when key material is frequently rotated, such as during every card transaction, and there is need to avoid importing short-lived keys into Amazon Web Services Payment Cryptography. To translate PIN block using dynamic keys, the keyARN is the Key Encryption Key (KEK) of the TR-31 wrapped PEK. The incoming wrapped key shall have a key purpose of P0 with a mode of use of B or D. For more information, see Using Dynamic Keys in the Amazon Web Services Payment Cryptography User Guide. The allowed combinations of PIN block format translations are guided by PCI. It is important to note that not all encrypted PIN block formats (example, format 1) require PAN (Primary Account Number) as input. And as such, PIN block format that requires PAN (example, formats 0,3,4) cannot be translated to a format (format 1) that does not require a PAN for generation. For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide. Amazon Web Services Payment Cryptography currently supports ISO PIN block 4 translation for PIN block built using legacy PAN length. That is, PAN is the right most 12 digits excluding the check digits. Cross-account use: This operation can't be used across different Amazon Web Services accounts. Related operations: GeneratePinData VerifyPinData", + "Translates encrypted PIN block from and to ISO 9564 formats 0,1,3,4. For more information, see Translate PIN data in the Amazon Web Services Payment Cryptography User Guide. PIN block translation involves changing a PIN block from one encryption key to another and optionally change its format. PIN block translation occurs entirely within the HSM boundary and PIN data never enters or leaves Amazon Web Services Payment Cryptography in clear text. The encryption key transformation can be from PEK (Pin Encryption Key) to BDK (Base Derivation Key) for DUKPT or from BDK for DUKPT to PEK. Amazon Web Services Payment Cryptography also supports use of dynamic keys and ECDH (Elliptic Curve Diffie-Hellman) based key exchange for this operation. Dynamic keys allow you to pass a PEK as a TR-31 WrappedKeyBlock. They can be used when key material is frequently rotated, such as during every card transaction, and there is need to avoid importing short-lived keys into Amazon Web Services Payment Cryptography. To translate PIN block using dynamic keys, the keyARN is the Key Encryption Key (KEK) of the TR-31 wrapped PEK. The incoming wrapped key shall have a key purpose of P0 with a mode of use of B or D. For more information, see Using Dynamic Keys in the Amazon Web Services Payment Cryptography User Guide. Using ECDH key exchange, you can receive cardholder selectable PINs into Amazon Web Services Payment Cryptography. The ECDH derived key protects the incoming PIN block, which is translated to a PEK encrypted PIN block for use within the service. You can also use ECDH for reveal PIN, wherein the service translates the PIN block from PEK to a ECDH derived encryption key. For more information on establishing ECDH derived keys, see the Generating keys in the Amazon Web Services Payment Cryptography User Guide. The allowed combinations of PIN block format translations are guided by PCI. It is important to note that not all encrypted PIN block formats (example, format 1) require PAN (Primary Account Number) as input. And as such, PIN block format that requires PAN (example, formats 0,3,4) cannot be translated to a format (format 1) that does not require a PAN for generation. For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide. Amazon Web Services Payment Cryptography currently supports ISO PIN block 4 translation for PIN block built using legacy PAN length. That is, PAN is the right most 12 digits excluding the check digits. Cross-account use: This operation can't be used across different Amazon Web Services accounts. Related operations: GeneratePinData VerifyPinData", options: [ { name: "--incoming-key-identifier", description: - "The keyARN of the encryption key under which incoming PIN block data is encrypted. This key type can be PEK or BDK. When a WrappedKeyBlock is provided, this value will be the identifier to the key wrapping key for PIN block. Otherwise, it is the key identifier used to perform the operation", + "The keyARN of the encryption key under which incoming PIN block data is encrypted. This key type can be PEK or BDK. For dynamic keys, it is the keyARN of KEK of the TR-31 wrapped PEK. For ECDH, it is the keyARN of the asymmetric ECC key", args: { name: "string", }, @@ -470,7 +478,7 @@ const completionSpec: Fig.Spec = { { name: "--outgoing-key-identifier", description: - "The keyARN of the encryption key for encrypting outgoing PIN block data. This key type can be PEK or BDK", + "The keyARN of the encryption key for encrypting outgoing PIN block data. This key type can be PEK or BDK. For ECDH, it is the keyARN of the asymmetric ECC key", args: { name: "string", }, @@ -806,6 +814,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--encryption-wrapped-key", + description: + "Parameter information of a WrappedKeyBlock for encryption key exchange", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/rds.ts b/src/aws/rds.ts index 22b9a1f6b7e0..fa9e117c40c8 100644 --- a/src/aws/rds.ts +++ b/src/aws/rds.ts @@ -194,7 +194,7 @@ const completionSpec: Fig.Spec = { { name: "--apply-action", description: - "The pending maintenance action to apply to this resource. Valid Values: system-update, db-upgrade, hardware-maintenance, ca-certificate-rotation", + "The pending maintenance action to apply to this resource. Valid Values: ca-certificate-rotation db-upgrade hardware-maintenance os-upgrade system-update For more information about these actions, see Maintenance actions for Amazon Aurora or Maintenance actions for Amazon RDS", args: { name: "string", }, @@ -1322,12 +1322,12 @@ const completionSpec: Fig.Spec = { { name: "--enable-limitless-database", description: - "Specifies whether to enable Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group. Valid for: Aurora DB clusters only", + "Specifies whether to enable Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group. Valid for: Aurora DB clusters only This setting is no longer used. Instead use the ClusterScalabilityType setting", }, { name: "--no-enable-limitless-database", description: - "Specifies whether to enable Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group. Valid for: Aurora DB clusters only", + "Specifies whether to enable Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group. Valid for: Aurora DB clusters only This setting is no longer used. Instead use the ClusterScalabilityType setting", }, { name: "--serverless-v2-scaling-configuration", @@ -8934,12 +8934,12 @@ const completionSpec: Fig.Spec = { { name: "--enable-limitless-database", description: - "Specifies whether to enable Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group. Valid for: Aurora DB clusters only", + "Specifies whether to enable Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group. Valid for: Aurora DB clusters only This setting is no longer used. Instead use the ClusterScalabilityType setting when you create your Aurora Limitless Database DB cluster", }, { name: "--no-enable-limitless-database", description: - "Specifies whether to enable Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group. Valid for: Aurora DB clusters only", + "Specifies whether to enable Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group. Valid for: Aurora DB clusters only This setting is no longer used. Instead use the ClusterScalabilityType setting when you create your Aurora Limitless Database DB cluster", }, { name: "--ca-certificate-identifier", @@ -11559,6 +11559,48 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--monitoring-interval", + description: + "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0. If MonitoringRoleArn is specified, also set MonitoringInterval to a value other than 0. Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60 Default: 0", + args: { + name: "integer", + }, + }, + { + name: "--monitoring-role-arn", + description: + "The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value", + args: { + name: "string", + }, + }, + { + name: "--enable-performance-insights", + description: + "Specifies whether to turn on Performance Insights for the DB cluster", + }, + { + name: "--no-enable-performance-insights", + description: + "Specifies whether to turn on Performance Insights for the DB cluster", + }, + { + name: "--performance-insights-kms-key-id", + description: + "The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region", + args: { + name: "string", + }, + }, + { + name: "--performance-insights-retention-period", + description: + "The number of days to retain Performance Insights data. Valid Values: 7 month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error", + args: { + name: "integer", + }, + }, { name: "--engine-lifecycle-support", description: @@ -11832,6 +11874,48 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--monitoring-interval", + description: + "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0. If MonitoringRoleArn is specified, also set MonitoringInterval to a value other than 0. Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60 Default: 0", + args: { + name: "integer", + }, + }, + { + name: "--monitoring-role-arn", + description: + "The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value", + args: { + name: "string", + }, + }, + { + name: "--enable-performance-insights", + description: + "Specifies whether to turn on Performance Insights for the DB cluster", + }, + { + name: "--no-enable-performance-insights", + description: + "Specifies whether to turn on Performance Insights for the DB cluster", + }, + { + name: "--performance-insights-kms-key-id", + description: + "The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region", + args: { + name: "string", + }, + }, + { + name: "--performance-insights-retention-period", + description: + "The number of days to retain Performance Insights data. Valid Values: 7 month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error", + args: { + name: "integer", + }, + }, { name: "--engine-lifecycle-support", description: diff --git a/src/aws/redshift-data.ts b/src/aws/redshift-data.ts index 67bed85724bb..25ed6d1dc3db 100644 --- a/src/aws/redshift-data.ts +++ b/src/aws/redshift-data.ts @@ -40,6 +40,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--result-format", + description: + "The data format of the result of the SQL statement. If no format is specified, the default is JSON", + args: { + name: "string", + }, + }, { name: "--secret-arn", description: @@ -352,6 +360,14 @@ const completionSpec: Fig.Spec = { name: "list", }, }, + { + name: "--result-format", + description: + "The data format of the result of the SQL statement. If no format is specified, the default is JSON", + args: { + name: "string", + }, + }, { name: "--secret-arn", description: @@ -430,7 +446,63 @@ const completionSpec: Fig.Spec = { { name: "get-statement-result", description: - "Fetches the temporarily cached result of an SQL statement. A token is returned to page through the statement results. For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide", + "Fetches the temporarily cached result of an SQL statement in JSON format. The ExecuteStatement or BatchExecuteStatement operation that ran the SQL statement must have specified ResultFormat as JSON , or let the format default to JSON. A token is returned to page through the statement results. For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide", + options: [ + { + name: "--id", + description: + "The identifier of the SQL statement whose results are to be fetched. This value is a universally unique identifier (UUID) generated by Amazon Redshift Data API. A suffix indicates then number of the SQL statement. For example, d9b6c0c9-0747-4bf4-b142-e8883122f766:2 has a suffix of :2 that indicates the second SQL statement of a batch query. This identifier is returned by BatchExecuteStatment, ExecuteStatment, and ListStatements", + args: { + name: "string", + }, + }, + { + name: "--next-token", + description: + "A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-statement-result-v2", + description: + "Fetches the temporarily cached result of an SQL statement in CSV format. The ExecuteStatement or BatchExecuteStatement operation that ran the SQL statement must have specified ResultFormat as CSV. A token is returned to page through the statement results. For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide", options: [ { name: "--id", diff --git a/src/aws/redshift-serverless.ts b/src/aws/redshift-serverless.ts index ee4c0599768e..182667d5419c 100644 --- a/src/aws/redshift-serverless.ts +++ b/src/aws/redshift-serverless.ts @@ -612,6 +612,14 @@ const completionSpec: Fig.Spec = { name: "integer", }, }, + { + name: "--price-performance-target", + description: + "An object that represents the price performance target settings for the workgroup", + args: { + name: "structure", + }, + }, { name: "--publicly-accessible", description: @@ -3060,6 +3068,14 @@ const completionSpec: Fig.Spec = { name: "integer", }, }, + { + name: "--price-performance-target", + description: + "An object that represents the price performance target settings for the workgroup", + args: { + name: "structure", + }, + }, { name: "--publicly-accessible", description: diff --git a/src/aws/redshift.ts b/src/aws/redshift.ts index 5c498e4de771..5a7c56dea8ce 100644 --- a/src/aws/redshift.ts +++ b/src/aws/redshift.ts @@ -1452,7 +1452,8 @@ const completionSpec: Fig.Spec = { }, { name: "create-integration", - description: "Creates a zero-ETL integration with Amazon Redshift", + description: + "Creates a zero-ETL integration or S3 event integration with Amazon Redshift", options: [ { name: "--source-arn", @@ -2354,7 +2355,8 @@ const completionSpec: Fig.Spec = { }, { name: "delete-integration", - description: "Deletes a zero-ETL integration with Amazon Redshift", + description: + "Deletes a zero-ETL integration or S3 event integration with Amazon Redshift", options: [ { name: "--integration-arn", @@ -4570,7 +4572,7 @@ const completionSpec: Fig.Spec = { { name: "describe-integrations", description: - "Describes one or more zero-ETL integrations with Amazon Redshift", + "Describes one or more zero-ETL or S3 event integrations with Amazon Redshift", options: [ { name: "--integration-arn", @@ -5659,7 +5661,7 @@ const completionSpec: Fig.Spec = { { name: "--resource-type", description: - "The type of resource with which you want to view tags. Valid resource types are: Cluster CIDR/IP EC2 security group Snapshot Cluster security group Subnet group HSM connection HSM certificate Parameter group Snapshot copy grant Integration (zero-ETL integration) To describe the tags associated with an integration, don't specify ResourceType, instead specify the ResourceName of the integration. For more information about Amazon Redshift resource types and constructing ARNs, go to Specifying Policy Elements: Actions, Effects, Resources, and Principals in the Amazon Redshift Cluster Management Guide", + "The type of resource with which you want to view tags. Valid resource types are: Cluster CIDR/IP EC2 security group Snapshot Cluster security group Subnet group HSM connection HSM certificate Parameter group Snapshot copy grant Integration (zero-ETL integration or S3 event integration) To describe the tags associated with an integration, don't specify ResourceType, instead specify the ResourceName of the integration. For more information about Amazon Redshift resource types and constructing ARNs, go to Specifying Policy Elements: Actions, Effects, Resources, and Principals in the Amazon Redshift Cluster Management Guide", args: { name: "string", }, @@ -7401,7 +7403,8 @@ const completionSpec: Fig.Spec = { }, { name: "modify-integration", - description: "Modifies a zero-ETL integration with Amazon Redshift", + description: + "Modifies a zero-ETL integration or S3 event integration with Amazon Redshift", options: [ { name: "--integration-arn", diff --git a/src/aws/route53.ts b/src/aws/route53.ts index e201b99068dd..65f43fcbebf9 100644 --- a/src/aws/route53.ts +++ b/src/aws/route53.ts @@ -440,7 +440,7 @@ const completionSpec: Fig.Spec = { { name: "create-query-logging-config", description: - "Creates a configuration for DNS query logging. After you create a query logging configuration, Amazon Route 53 begins to publish log data to an Amazon CloudWatch Logs log group. DNS query logs contain information about the queries that Route 53 receives for a specified public hosted zone, such as the following: Route 53 edge location that responded to the DNS query Domain or subdomain that was requested DNS record type, such as A or AAAA DNS response code, such as NoError or ServFail Log Group and Resource Policy Before you create a query logging configuration, perform the following operations. If you create a query logging configuration using the Route 53 console, Route 53 performs these operations automatically. Create a CloudWatch Logs log group, and make note of the ARN, which you specify when you create a query logging configuration. Note the following: You must create the log group in the us-east-1 region. You must use the same Amazon Web Services account to create the log group and the hosted zone that you want to configure query logging for. When you create log groups for query logging, we recommend that you use a consistent prefix, for example: /aws/route53/hosted zone name In the next step, you'll create a resource policy, which controls access to one or more log groups and the associated Amazon Web Services resources, such as Route 53 hosted zones. There's a limit on the number of resource policies that you can create, so we recommend that you use a consistent prefix so you can use the same resource policy for all the log groups that you create for query logging. Create a CloudWatch Logs resource policy, and give it the permissions that Route 53 needs to create log streams and to send query logs to log streams. For the value of Resource, specify the ARN for the log group that you created in the previous step. To use the same resource policy for all the CloudWatch Logs log groups that you created for query logging configurations, replace the hosted zone name with *, for example: arn:aws:logs:us-east-1:123412341234:log-group:/aws/route53/* To avoid the confused deputy problem, a security issue where an entity without a permission for an action can coerce a more-privileged entity to perform it, you can optionally limit the permissions that a service has to a resource in a resource-based policy by supplying the following values: For aws:SourceArn, supply the hosted zone ARN used in creating the query logging configuration. For example, aws:SourceArn: arn:aws:route53:::hostedzone/hosted zone ID. For aws:SourceAccount, supply the account ID for the account that creates the query logging configuration. For example, aws:SourceAccount:111111111111. For more information, see The confused deputy problem in the Amazon Web Services IAM User Guide. You can't use the CloudWatch console to create or edit a resource policy. You must use the CloudWatch API, one of the Amazon Web Services SDKs, or the CLI. Log Streams and Edge Locations When Route 53 finishes creating the configuration for DNS query logging, it does the following: Creates a log stream for an edge location the first time that the edge location responds to DNS queries for the specified hosted zone. That log stream is used to log all queries that Route 53 responds to for that edge location. Begins to send query logs to the applicable log stream. The name of each log stream is in the following format: hosted zone ID/edge location code The edge location code is a three-letter code and an arbitrarily assigned number, for example, DFW3. The three-letter code typically corresponds with the International Air Transport Association airport code for an airport near the edge location. (These abbreviations might change in the future.) For a list of edge locations, see \"The Route 53 Global Network\" on the Route 53 Product Details page. Queries That Are Logged Query logs contain only the queries that DNS resolvers forward to Route 53. If a DNS resolver has already cached the response to a query (such as the IP address for a load balancer for example.com), the resolver will continue to return the cached response. It doesn't forward another query to Route 53 until the TTL for the corresponding resource record set expires. Depending on how many DNS queries are submitted for a resource record set, and depending on the TTL for that resource record set, query logs might contain information about only one query out of every several thousand queries that are submitted to DNS. For more information about how DNS works, see Routing Internet Traffic to Your Website or Web Application in the Amazon Route 53 Developer Guide. Log File Format For a list of the values in each query log and the format of each value, see Logging DNS Queries in the Amazon Route 53 Developer Guide. Pricing For information about charges for query logs, see Amazon CloudWatch Pricing. How to Stop Logging If you want Route 53 to stop sending query logs to CloudWatch Logs, delete the query logging configuration. For more information, see DeleteQueryLoggingConfig", + "Creates a configuration for DNS query logging. After you create a query logging configuration, Amazon Route 53 begins to publish log data to an Amazon CloudWatch Logs log group. DNS query logs contain information about the queries that Route 53 receives for a specified public hosted zone, such as the following: Route 53 edge location that responded to the DNS query Domain or subdomain that was requested DNS record type, such as A or AAAA DNS response code, such as NoError or ServFail Log Group and Resource Policy Before you create a query logging configuration, perform the following operations. If you create a query logging configuration using the Route 53 console, Route 53 performs these operations automatically. Create a CloudWatch Logs log group, and make note of the ARN, which you specify when you create a query logging configuration. Note the following: You must create the log group in the us-east-1 region. You must use the same Amazon Web Services account to create the log group and the hosted zone that you want to configure query logging for. When you create log groups for query logging, we recommend that you use a consistent prefix, for example: /aws/route53/hosted zone name In the next step, you'll create a resource policy, which controls access to one or more log groups and the associated Amazon Web Services resources, such as Route 53 hosted zones. There's a limit on the number of resource policies that you can create, so we recommend that you use a consistent prefix so you can use the same resource policy for all the log groups that you create for query logging. Create a CloudWatch Logs resource policy, and give it the permissions that Route 53 needs to create log streams and to send query logs to log streams. You must create the CloudWatch Logs resource policy in the us-east-1 region. For the value of Resource, specify the ARN for the log group that you created in the previous step. To use the same resource policy for all the CloudWatch Logs log groups that you created for query logging configurations, replace the hosted zone name with *, for example: arn:aws:logs:us-east-1:123412341234:log-group:/aws/route53/* To avoid the confused deputy problem, a security issue where an entity without a permission for an action can coerce a more-privileged entity to perform it, you can optionally limit the permissions that a service has to a resource in a resource-based policy by supplying the following values: For aws:SourceArn, supply the hosted zone ARN used in creating the query logging configuration. For example, aws:SourceArn: arn:aws:route53:::hostedzone/hosted zone ID. For aws:SourceAccount, supply the account ID for the account that creates the query logging configuration. For example, aws:SourceAccount:111111111111. For more information, see The confused deputy problem in the Amazon Web Services IAM User Guide. You can't use the CloudWatch console to create or edit a resource policy. You must use the CloudWatch API, one of the Amazon Web Services SDKs, or the CLI. Log Streams and Edge Locations When Route 53 finishes creating the configuration for DNS query logging, it does the following: Creates a log stream for an edge location the first time that the edge location responds to DNS queries for the specified hosted zone. That log stream is used to log all queries that Route 53 responds to for that edge location. Begins to send query logs to the applicable log stream. The name of each log stream is in the following format: hosted zone ID/edge location code The edge location code is a three-letter code and an arbitrarily assigned number, for example, DFW3. The three-letter code typically corresponds with the International Air Transport Association airport code for an airport near the edge location. (These abbreviations might change in the future.) For a list of edge locations, see \"The Route 53 Global Network\" on the Route 53 Product Details page. Queries That Are Logged Query logs contain only the queries that DNS resolvers forward to Route 53. If a DNS resolver has already cached the response to a query (such as the IP address for a load balancer for example.com), the resolver will continue to return the cached response. It doesn't forward another query to Route 53 until the TTL for the corresponding resource record set expires. Depending on how many DNS queries are submitted for a resource record set, and depending on the TTL for that resource record set, query logs might contain information about only one query out of every several thousand queries that are submitted to DNS. For more information about how DNS works, see Routing Internet Traffic to Your Website or Web Application in the Amazon Route 53 Developer Guide. Log File Format For a list of the values in each query log and the format of each value, see Logging DNS Queries in the Amazon Route 53 Developer Guide. Pricing For information about charges for query logs, see Amazon CloudWatch Pricing. How to Stop Logging If you want Route 53 to stop sending query logs to CloudWatch Logs, delete the query logging configuration. For more information, see DeleteQueryLoggingConfig", options: [ { name: "--hosted-zone-id", @@ -1456,7 +1456,7 @@ const completionSpec: Fig.Spec = { { name: "get-hosted-zone", description: - "Gets information about a specified hosted zone including the four name servers assigned to the hosted zone", + "Gets information about a specified hosted zone including the four name servers assigned to the hosted zone. returns the VPCs associated with the specified hosted zone and does not reflect the VPC associations by Route 53 Profiles. To get the associations to a Profile, call the ListProfileAssociations API", options: [ { name: "--id", @@ -2195,7 +2195,7 @@ const completionSpec: Fig.Spec = { { name: "list-hosted-zones-by-vpc", description: - "Lists all the private hosted zones that a specified VPC is associated with, regardless of which Amazon Web Services account or Amazon Web Services service owns the hosted zones. The HostedZoneOwner structure in the response contains one of the following values: An OwningAccount element, which contains the account number of either the current Amazon Web Services account or another Amazon Web Services account. Some services, such as Cloud Map, create hosted zones using the current account. An OwningService element, which identifies the Amazon Web Services service that created and owns the hosted zone. For example, if a hosted zone was created by Amazon Elastic File System (Amazon EFS), the value of Owner is efs.amazonaws.com. When listing private hosted zones, the hosted zone and the Amazon VPC must belong to the same partition where the hosted zones were created. A partition is a group of Amazon Web Services Regions. Each Amazon Web Services account is scoped to one partition. The following are the supported partitions: aws - Amazon Web Services Regions aws-cn - China Regions aws-us-gov - Amazon Web Services GovCloud (US) Region For more information, see Access Management in the Amazon Web Services General Reference", + "Lists all the private hosted zones that a specified VPC is associated with, regardless of which Amazon Web Services account or Amazon Web Services service owns the hosted zones. The HostedZoneOwner structure in the response contains one of the following values: An OwningAccount element, which contains the account number of either the current Amazon Web Services account or another Amazon Web Services account. Some services, such as Cloud Map, create hosted zones using the current account. An OwningService element, which identifies the Amazon Web Services service that created and owns the hosted zone. For example, if a hosted zone was created by Amazon Elastic File System (Amazon EFS), the value of Owner is efs.amazonaws.com. ListHostedZonesByVPC returns the hosted zones associated with the specified VPC and does not reflect the hosted zone associations to VPCs via Route 53 Profiles. To get the associations to a Profile, call the ListProfileResourceAssociations API. When listing private hosted zones, the hosted zone and the Amazon VPC must belong to the same partition where the hosted zones were created. A partition is a group of Amazon Web Services Regions. Each Amazon Web Services account is scoped to one partition. The following are the supported partitions: aws - Amazon Web Services Regions aws-cn - China Regions aws-us-gov - Amazon Web Services GovCloud (US) Region For more information, see Access Management in the Amazon Web Services General Reference", options: [ { name: "--vpc-id", diff --git a/src/aws/sagemaker.ts b/src/aws/sagemaker.ts index 0e14b3cd7f6d..cbbc99086367 100644 --- a/src/aws/sagemaker.ts +++ b/src/aws/sagemaker.ts @@ -127,6 +127,46 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "batch-delete-cluster-nodes", + description: + "Deletes specific nodes within a SageMaker HyperPod cluster. BatchDeleteClusterNodes accepts a cluster name and a list of node IDs. To safeguard your work, back up your data to Amazon S3 or an FSx for Lustre file system before invoking the API on a worker node group. This will help prevent any potential data loss from the instance root volume. For more information about backup, see Use the backup script provided by SageMaker HyperPod. If you want to invoke this API on an existing cluster, you'll first need to patch the cluster by running the UpdateClusterSoftware API. For more information about patching a cluster, see Update the SageMaker HyperPod platform software of a cluster", + options: [ + { + name: "--cluster-name", + description: + "The name of the SageMaker HyperPod cluster from which to delete the specified nodes", + args: { + name: "string", + }, + }, + { + name: "--node-ids", + description: + "A list of node IDs to be deleted from the specified cluster. For SageMaker HyperPod clusters using the Slurm workload manager, you cannot remove instances that are configured as Slurm controller nodes", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "batch-describe-model-package", description: @@ -1289,7 +1329,8 @@ const completionSpec: Fig.Spec = { }, { name: "--default-space-settings", - description: "The default settings used to create a space", + description: + "The default settings for shared spaces that users create in the domain", args: { name: "structure", }, @@ -3367,6 +3408,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--model-life-cycle", + description: + "A structure describing the current state of the model in its life cycle", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: @@ -3668,7 +3717,7 @@ const completionSpec: Fig.Spec = { { name: "--accelerator-types", description: - "A list of Elastic Inference (EI) instance types to associate with this notebook instance. Currently, only one instance type can be associated with a notebook instance. For more information, see Using Elastic Inference in Amazon SageMaker", + "This parameter is no longer supported. Elastic Inference (EI) is no longer available. This parameter was used to specify a list of EI instance types to associate with this notebook instance", args: { name: "list", }, @@ -3975,7 +4024,7 @@ const completionSpec: Fig.Spec = { { name: "create-presigned-domain-url", description: - "Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to the domain, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System volume. This operation can only be called when the authentication mode equals IAM. The IAM role or user passed to this API defines the permissions to access the app. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the app. You can restrict access to this API and to the URL that it returns to a list of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more information, see Connect to Amazon SageMaker Studio Through an Interface VPC Endpoint . The URL that you get from a call to CreatePresignedDomainUrl has a default timeout of 5 minutes. You can configure this value using ExpiresInSeconds. If you try to use the URL after the timeout limit expires, you are directed to the Amazon Web Services console sign-in page", + "Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to the domain, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System volume. This operation can only be called when the authentication mode equals IAM. The IAM role or user passed to this API defines the permissions to access the app. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the app. You can restrict access to this API and to the URL that it returns to a list of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more information, see Connect to Amazon SageMaker Studio Through an Interface VPC Endpoint . The URL that you get from a call to CreatePresignedDomainUrl has a default timeout of 5 minutes. You can configure this value using ExpiresInSeconds. If you try to use the URL after the timeout limit expires, you are directed to the Amazon Web Services console sign-in page. The JupyterLab session default expiration time is 12 hours. You can configure this value using SessionExpirationDurationInSeconds", options: [ { name: "--domain-id", @@ -19242,7 +19291,7 @@ const completionSpec: Fig.Spec = { { name: "update-cluster-software", description: - "Updates the platform software of a SageMaker HyperPod cluster for security patching. To learn how to use this API, see Update the SageMaker HyperPod platform software of a cluster", + "Updates the platform software of a SageMaker HyperPod cluster for security patching. To learn how to use this API, see Update the SageMaker HyperPod platform software of a cluster. The UpgradeClusterSoftware API call may impact your SageMaker HyperPod cluster uptime and availability. Plan accordingly to mitigate potential disruptions to your workloads", options: [ { name: "--cluster-name", @@ -19499,7 +19548,7 @@ const completionSpec: Fig.Spec = { { name: "--default-space-settings", description: - "The default settings used to create a space within the domain", + "The default settings for shared spaces that users create in the domain", args: { name: "structure", }, @@ -20387,6 +20436,22 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--model-life-cycle", + description: + "A structure describing the current state of the model in its life cycle", + args: { + name: "structure", + }, + }, + { + name: "--client-token", + description: + "A unique token that guarantees that the call to this API is idempotent", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -20570,7 +20635,7 @@ const completionSpec: Fig.Spec = { { name: "--accelerator-types", description: - "A list of the Elastic Inference (EI) instance types to associate with this notebook instance. Currently only one EI instance type can be associated with a notebook instance. For more information, see Using Elastic Inference in Amazon SageMaker", + "This parameter is no longer supported. Elastic Inference (EI) is no longer available. This parameter was used to specify a list of the EI instance types to associate with this notebook instance", args: { name: "list", }, @@ -20578,12 +20643,12 @@ const completionSpec: Fig.Spec = { { name: "--disassociate-accelerator-types", description: - "A list of the Elastic Inference (EI) instance types to remove from this notebook instance. This operation is idempotent. If you specify an accelerator type that is not associated with the notebook instance when you call this method, it does not throw an error", + "This parameter is no longer supported. Elastic Inference (EI) is no longer available. This parameter was used to specify a list of the EI instance types to remove from this notebook instance", }, { name: "--no-disassociate-accelerator-types", description: - "A list of the Elastic Inference (EI) instance types to remove from this notebook instance. This operation is idempotent. If you specify an accelerator type that is not associated with the notebook instance when you call this method, it does not throw an error", + "This parameter is no longer supported. Elastic Inference (EI) is no longer available. This parameter was used to specify a list of the EI instance types to remove from this notebook instance", }, { name: "--disassociate-default-code-repository", diff --git a/src/aws/socialmessaging.ts b/src/aws/socialmessaging.ts index c63676b1dc84..063318d54969 100644 --- a/src/aws/socialmessaging.ts +++ b/src/aws/socialmessaging.ts @@ -544,5 +544,4 @@ const completionSpec: Fig.Spec = { }, ], }; - export default completionSpec; diff --git a/src/aws/storagegateway.ts b/src/aws/storagegateway.ts index e44ec66aacca..f50e6b9d66ff 100644 --- a/src/aws/storagegateway.ts +++ b/src/aws/storagegateway.ts @@ -1,7 +1,7 @@ const completionSpec: Fig.Spec = { name: "storagegateway", description: - "Storage Gateway Service Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and the Amazon Web Services storage infrastructure. The service enables you to securely upload data to the Amazon Web Services Cloud for cost effective backup and rapid disaster recovery. Use the following links to get started using the Storage Gateway Service API Reference: Storage Gateway required request headers: Describes the required headers that you must send with every POST request to Storage Gateway. Signing requests: Storage Gateway requires that you authenticate every request you send; this topic describes how sign such a request. Error responses: Provides reference information about Storage Gateway errors. Operations in Storage Gateway: Contains detailed descriptions of all Storage Gateway operations, their request parameters, response elements, possible errors, and examples of requests and responses. Storage Gateway endpoints and quotas: Provides a list of each Amazon Web Services Region and the endpoints available for use with Storage Gateway. Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected. IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS resource IDs. For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following: arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG. A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee. For more information, see Announcement: Heads-up \u2013 Longer Storage Gateway volume and snapshot IDs coming in 2016", + "Storage Gateway Service Amazon FSx File Gateway is no longer available to new customers. Existing customers of FSx File Gateway can continue to use the service normally. For capabilities similar to FSx File Gateway, visit this blog post. Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and the Amazon Web Services storage infrastructure. The service enables you to securely upload data to the Amazon Web Services Cloud for cost effective backup and rapid disaster recovery. Use the following links to get started using the Storage Gateway Service API Reference: Storage Gateway required request headers: Describes the required headers that you must send with every POST request to Storage Gateway. Signing requests: Storage Gateway requires that you authenticate every request you send; this topic describes how sign such a request. Error responses: Provides reference information about Storage Gateway errors. Operations in Storage Gateway: Contains detailed descriptions of all Storage Gateway operations, their request parameters, response elements, possible errors, and examples of requests and responses. Storage Gateway endpoints and quotas: Provides a list of each Amazon Web Services Region and the endpoints available for use with Storage Gateway. Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected. IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS resource IDs. For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following: arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG. A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee. For more information, see Announcement: Heads-up \u2013 Longer Storage Gateway volume and snapshot IDs coming in 2016", subcommands: [ { name: "activate-gateway", @@ -42,7 +42,7 @@ const completionSpec: Fig.Spec = { { name: "--gateway-type", description: - "A value that defines the type of gateway to activate. The type specified is critical to all later functions of the gateway and cannot be changed after activation. The default value is CACHED. Valid Values: STORED | CACHED | VTL | FILE_S3 | FILE_FSX_SMB", + "A value that defines the type of gateway to activate. The type specified is critical to all later functions of the gateway and cannot be changed after activation. The default value is CACHED. Amazon FSx File Gateway is no longer available to new customers. Existing customers of FSx File Gateway can continue to use the service normally. For capabilities similar to FSx File Gateway, visit this blog post. Valid Values: STORED | CACHED | VTL | FILE_S3 | FILE_FSX_SMB", args: { name: "string", }, @@ -709,7 +709,7 @@ const completionSpec: Fig.Spec = { { name: "--location-arn", description: - "A custom ARN for the backend storage used for storing data for file shares. It includes a resource ARN with an optional prefix concatenation. The prefix must end with a forward slash (/). You can specify LocationARN as a bucket ARN, access point ARN or access point alias, as shown in the following examples. Bucket ARN: arn:aws:s3:::my-bucket/prefix/ Access point ARN: arn:aws:s3:region:account-id:accesspoint/access-point-name/prefix/ If you specify an access point, the bucket policy must be configured to delegate access control to the access point. For information, see Delegating access control to access points in the Amazon S3 User Guide. Access point alias: test-ap-ab123cdef4gehijklmn5opqrstuvuse1a-s3alias", + "A custom ARN for the backend storage used for storing data for file shares. It includes a resource ARN with an optional prefix concatenation. The prefix must end with a forward slash (/). You can specify LocationARN as a bucket ARN, access point ARN or access point alias, as shown in the following examples. Bucket ARN: arn:aws:s3:::amzn-s3-demo-bucket/prefix/ Access point ARN: arn:aws:s3:region:account-id:accesspoint/access-point-name/prefix/ If you specify an access point, the bucket policy must be configured to delegate access control to the access point. For information, see Delegating access control to access points in the Amazon S3 User Guide. Access point alias: test-ap-ab123cdef4gehijklmn5opqrstuvuse1a-s3alias", args: { name: "string", }, @@ -787,7 +787,7 @@ const completionSpec: Fig.Spec = { { name: "--file-share-name", description: - "The name of the file share. Optional. FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used", + "The name of the file share. Optional. FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used. A valid NFS file share name can only contain the following characters: a-z, A-Z, 0-9, -, ., and _", args: { name: "string", }, @@ -908,7 +908,7 @@ const completionSpec: Fig.Spec = { { name: "--location-arn", description: - "A custom ARN for the backend storage used for storing data for file shares. It includes a resource ARN with an optional prefix concatenation. The prefix must end with a forward slash (/). You can specify LocationARN as a bucket ARN, access point ARN or access point alias, as shown in the following examples. Bucket ARN: arn:aws:s3:::my-bucket/prefix/ Access point ARN: arn:aws:s3:region:account-id:accesspoint/access-point-name/prefix/ If you specify an access point, the bucket policy must be configured to delegate access control to the access point. For information, see Delegating access control to access points in the Amazon S3 User Guide. Access point alias: test-ap-ab123cdef4gehijklmn5opqrstuvuse1a-s3alias", + "A custom ARN for the backend storage used for storing data for file shares. It includes a resource ARN with an optional prefix concatenation. The prefix must end with a forward slash (/). You can specify LocationARN as a bucket ARN, access point ARN or access point alias, as shown in the following examples. Bucket ARN: arn:aws:s3:::amzn-s3-demo-bucket/prefix/ Access point ARN: arn:aws:s3:region:account-id:accesspoint/access-point-name/prefix/ If you specify an access point, the bucket policy must be configured to delegate access control to the access point. For information, see Delegating access control to access points in the Amazon S3 User Guide. Access point alias: test-ap-ab123cdef4gehijklmn5opqrstuvuse1a-s3alias", args: { name: "string", }, @@ -1038,7 +1038,7 @@ const completionSpec: Fig.Spec = { { name: "--file-share-name", description: - "The name of the file share. Optional. FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used", + 'The name of the file share. Optional. FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used. A valid SMB file share name cannot contain the following characters: [,],#,;,<,>,:,",\\,/,|,?,*,+, or ASCII control characters 1-31', args: { name: "string", }, @@ -4539,7 +4539,7 @@ const completionSpec: Fig.Spec = { { name: "--file-share-name", description: - "The name of the file share. Optional. FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used", + "The name of the file share. Optional. FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used. A valid NFS file share name can only contain the following characters: a-z, A-Z, 0-9, -, ., and _", args: { name: "string", }, @@ -4734,7 +4734,7 @@ const completionSpec: Fig.Spec = { { name: "--file-share-name", description: - "The name of the file share. Optional. FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used", + 'The name of the file share. Optional. FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used. A valid SMB file share name cannot contain the following characters: [,],#,;,<,>,:,",\\,/,|,?,*,+, or ASCII control characters 1-31', args: { name: "string", }, diff --git a/src/aws/supplychain.ts b/src/aws/supplychain.ts index 49468cde3950..3216c867c6b8 100644 --- a/src/aws/supplychain.ts +++ b/src/aws/supplychain.ts @@ -53,7 +53,7 @@ const completionSpec: Fig.Spec = { { name: "create-data-integration-flow", description: - "Create DataIntegrationFlow to map one or more different sources to one target using the SQL transformation query", + "Enables you to programmatically create a data pipeline to ingest data from source systems such as Amazon S3 buckets, to a predefined Amazon Web Services Supply Chain dataset (product, inbound_order) or a temporary dataset along with the data transformation query provided with the API", options: [ { name: "--instance-id", @@ -120,7 +120,8 @@ const completionSpec: Fig.Spec = { }, { name: "create-data-lake-dataset", - description: "Create a data lake dataset", + description: + "Enables you to programmatically create an Amazon Web Services Supply Chain data lake dataset. Developers can create the datasets using their pre-defined or custom schema for a given instance ID, namespace, and dataset name", options: [ { name: "--instance-id", @@ -190,7 +191,7 @@ const completionSpec: Fig.Spec = { { name: "create-instance", description: - "Create a new instance for AWS Supply Chain. This is an asynchronous operation. Upon receiving a CreateInstance request, AWS Supply Chain immediately returns the instance resource, with instance ID, and the initializing state while simultaneously creating all required Amazon Web Services resources for an instance creation. You can use GetInstance to check the status of the instance", + "Enables you to programmatically create an Amazon Web Services Supply Chain instance by applying KMS keys and relevant information associated with the API without using the Amazon Web Services console. This is an asynchronous operation. Upon receiving a CreateInstance request, Amazon Web Services Supply Chain immediately returns the instance resource, instance ID, and the initializing state while simultaneously creating all required Amazon Web Services resources for an instance creation. You can use GetInstance to check the status of the instance. If the instance results in an unhealthy state, you need to check the error message, delete the current instance, and recreate a new one based on the mitigation from the error message", options: [ { name: "--instance-name", @@ -250,7 +251,8 @@ const completionSpec: Fig.Spec = { }, { name: "delete-data-integration-flow", - description: "Delete the DataIntegrationFlow", + description: + "Enable you to programmatically delete an existing data pipeline for the provided Amazon Web Services Supply Chain instance and DataIntegrationFlow name", options: [ { name: "--instance-id", @@ -288,7 +290,8 @@ const completionSpec: Fig.Spec = { }, { name: "delete-data-lake-dataset", - description: "Delete a data lake dataset", + description: + "Enables you to programmatically delete an Amazon Web Services Supply Chain data lake dataset. Developers can delete the existing datasets for a given instance ID, namespace, and instance name", options: [ { name: "--instance-id", @@ -335,7 +338,7 @@ const completionSpec: Fig.Spec = { { name: "delete-instance", description: - "Delete the instance. This is an asynchronous operation. Upon receiving a DeleteInstance request, AWS Supply Chain immediately returns a response with the instance resource, delete state while cleaning up all Amazon Web Services resources created during the instance creation process. You can use the GetInstance action to check the instance status", + "Enables you to programmatically delete an Amazon Web Services Supply Chain instance by deleting the KMS keys and relevant information associated with the API without using the Amazon Web Services console. This is an asynchronous operation. Upon receiving a DeleteInstance request, Amazon Web Services Supply Chain immediately returns a response with the instance resource, delete state while cleaning up all Amazon Web Services resources created during the instance creation process. You can use the GetInstance action to check the instance status", options: [ { name: "--instance-id", @@ -402,7 +405,8 @@ const completionSpec: Fig.Spec = { }, { name: "get-data-integration-flow", - description: "View the DataIntegrationFlow details", + description: + "Enables you to programmatically view a specific data pipeline for the provided Amazon Web Services Supply Chain instance and DataIntegrationFlow name", options: [ { name: "--instance-id", @@ -440,7 +444,8 @@ const completionSpec: Fig.Spec = { }, { name: "get-data-lake-dataset", - description: "Get a data lake dataset", + description: + "Enables you to programmatically view an Amazon Web Services Supply Chain data lake dataset. Developers can view the data lake dataset information such as namespace, schema, and so on for a given instance ID, namespace, and dataset name", options: [ { name: "--instance-id", @@ -487,7 +492,8 @@ const completionSpec: Fig.Spec = { }, { name: "get-instance", - description: "Get the AWS Supply Chain instance details", + description: + "Enables you to programmatically retrieve the information related to an Amazon Web Services Supply Chain instance ID", options: [ { name: "--instance-id", @@ -517,7 +523,8 @@ const completionSpec: Fig.Spec = { }, { name: "list-data-integration-flows", - description: "Lists all the DataIntegrationFlows in a paginated way", + description: + "Enables you to programmatically list all data pipelines for the provided Amazon Web Services Supply Chain instance", options: [ { name: "--instance-id", @@ -589,7 +596,7 @@ const completionSpec: Fig.Spec = { { name: "list-data-lake-datasets", description: - "List the data lake datasets for a specific instance and name space", + "Enables you to programmatically view the list of Amazon Web Services Supply Chain data lake datasets. Developers can view the datasets and the corresponding information such as namespace, schema, and so on for a given instance ID and namespace", options: [ { name: "--instance-id", @@ -667,7 +674,8 @@ const completionSpec: Fig.Spec = { }, { name: "list-instances", - description: "List all the AWS Supply Chain instances in a paginated way", + description: + "List all Amazon Web Services Supply Chain instances for a specific account. Enables you to programmatically list all Amazon Web Services Supply Chain instances based on their account ID, instance name, and state of the instance (active or delete)", options: [ { name: "--next-token", @@ -745,7 +753,7 @@ const completionSpec: Fig.Spec = { { name: "list-tags-for-resource", description: - "List all the tags for an Amazon Web ServicesSupply Chain resource", + "List all the tags for an Amazon Web ServicesSupply Chain resource. You can list all the tags added to a resource. By listing the tags, developers can view the tag level information on a resource and perform actions such as, deleting a resource associated with a particular tag", options: [ { name: "--resource-arn", @@ -845,7 +853,7 @@ const completionSpec: Fig.Spec = { { name: "tag-resource", description: - "Create tags for an Amazon Web Services Supply chain resource", + "You can create tags during or after creating a resource such as instance, data flow, or dataset in AWS Supply chain. During the data ingestion process, you can add tags such as dev, test, or prod to data flows created during the data ingestion process in the AWS Supply Chain datasets. You can use these tags to identify a group of resources or a single resource used by the developer", options: [ { name: "--resource-arn", @@ -885,7 +893,7 @@ const completionSpec: Fig.Spec = { { name: "untag-resource", description: - "Delete tags for an Amazon Web Services Supply chain resource", + "You can delete tags for an Amazon Web Services Supply chain resource such as instance, data flow, or dataset in AWS Supply Chain. During the data ingestion process, you can delete tags such as dev, test, or prod to data flows created during the data ingestion process in the AWS Supply Chain datasets", options: [ { name: "--resource-arn", @@ -924,7 +932,8 @@ const completionSpec: Fig.Spec = { }, { name: "update-data-integration-flow", - description: "Update the DataIntegrationFlow", + description: + "Enables you to programmatically update an existing data pipeline to ingest data from the source systems such as, Amazon S3 buckets, to a predefined Amazon Web Services Supply Chain dataset (product, inbound_order) or a temporary dataset along with the data transformation query provided with the API", options: [ { name: "--instance-id", @@ -986,7 +995,8 @@ const completionSpec: Fig.Spec = { }, { name: "update-data-lake-dataset", - description: "Update a data lake dataset", + description: + "Enables you to programmatically update an Amazon Web Services Supply Chain data lake dataset. Developers can update the description of a data lake dataset for a given instance ID, namespace, and dataset name", options: [ { name: "--instance-id", @@ -1039,7 +1049,8 @@ const completionSpec: Fig.Spec = { }, { name: "update-instance", - description: "Update the instance", + description: + "Enables you to programmatically update an Amazon Web Services Supply Chain instance description by providing all the relevant information such as account ID, instance ID and so on without using the AWS console", options: [ { name: "--instance-id", diff --git a/src/aws/taxsettings.ts b/src/aws/taxsettings.ts index 64c9427bca22..d75a18cca352 100644 --- a/src/aws/taxsettings.ts +++ b/src/aws/taxsettings.ts @@ -37,7 +37,7 @@ const completionSpec: Fig.Spec = { { name: "batch-put-tax-registration", description: - "Adds or updates tax registration for multiple accounts in batch. This can be used to add or update tax registrations for up to five accounts in one batch. You can't set a TRN if there's a pending TRN. You'll need to delete the pending TRN first. To call this API operation for specific countries, see the following country-specific requirements. Bangladesh You must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Brazil You must complete the tax registration process in the Payment preferences page in the Billing and Cost Management console. After your TRN and billing address are verified, you can call this API operation. For Amazon Web Services accounts created through Organizations, you can call this API operation when you don't have a billing address. Georgia The valid personType values are Physical Person and Business. Kenya You must specify the personType in the kenyaAdditionalInfo field of the additionalTaxInformation object. If the personType is Physical Person, you must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Malaysia If you use this operation to set a tax registration number (TRN) in Malaysia, only resellers with a valid sales and service tax (SST) number are required to provide tax registration information. By using this API operation to set a TRN in Malaysia, Amazon Web Services will regard you as self-declaring that you're an authorized business reseller registered with the Royal Malaysia Customs Department (RMCD) and have a valid SST number. Amazon Web Services reserves the right to seek additional information and/or take other actions to support your self-declaration as appropriate. If you're not a reseller of Amazon Web Services, we don't recommend that you use this operation to set the TRN in Malaysia. Only use this API operation to upload the TRNs for accounts through which you're reselling Amazon Web Services. Amazon Web Services is currently registered under the following service tax codes. You must include at least one of the service tax codes in the service tax code strings to declare yourself as an authorized registered business reseller. Taxable service and service tax codes: Consultancy - 9907061674 Training or coaching service - 9907071685 IT service - 9907101676 Digital services and electronic medium - 9907121690 Nepal The sector valid values are Business and Individual. Saudi Arabia For address, you must specify addressLine3. South Korea You must specify the certifiedEmailId and legalName in the TaxRegistrationEntry object. Use Korean characters for legalName. You must specify the businessRepresentativeName, itemOfBusiness, and lineOfBusiness in the southKoreaAdditionalInfo field of the additionalTaxInformation object. Use Korean characters for these fields. You must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. For the address object, use Korean characters for addressLine1, addressLine2 city, postalCode, and stateOrRegion. Spain You must specify the registrationType in the spainAdditionalInfo field of the additionalTaxInformation object. If the registrationType is Local, you must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Turkey You must specify the sector in the taxRegistrationEntry object. If your sector is Business, Individual, or Government: Specify the taxOffice. If your sector is Individual, don't enter this value. (Optional) Specify the kepEmailId. If your sector is Individual, don't enter this value. Note: In the Tax Settings page of the Billing console, Government appears as Public institutions If your sector is Business and you're subject to KDV tax, you must specify your industry in the industries field. For address, you must specify districtOrCounty. Ukraine The sector valid values are Business and Individual", + "Adds or updates tax registration for multiple accounts in batch. This can be used to add or update tax registrations for up to five accounts in one batch. You can't set a TRN if there's a pending TRN. You'll need to delete the pending TRN first. To call this API operation for specific countries, see the following country-specific requirements. Bangladesh You must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Brazil You must complete the tax registration process in the Payment preferences page in the Billing and Cost Management console. After your TRN and billing address are verified, you can call this API operation. For Amazon Web Services accounts created through Organizations, you can call this API operation when you don't have a billing address. Georgia The valid personType values are Physical Person and Business. Kenya You must specify the personType in the kenyaAdditionalInfo field of the additionalTaxInformation object. If the personType is Physical Person, you must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Malaysia The sector valid values are Business and Individual. RegistrationType valid values are NRIC for individual, and TIN and sales and service tax (SST) for Business. For individual, you can specify the taxInformationNumber in MalaysiaAdditionalInfo with NRIC type, and a valid MyKad or NRIC number. For business, you must specify a businessRegistrationNumber in MalaysiaAdditionalInfo with a TIN type and tax identification number. For business resellers, you must specify a businessRegistrationNumber and taxInformationNumber in MalaysiaAdditionalInfo with a sales and service tax (SST) type and a valid SST number. For business resellers with service codes, you must specify businessRegistrationNumber, taxInformationNumber, and distinct serviceTaxCodes in MalaysiaAdditionalInfo with a SST type and valid sales and service tax (SST) number. By using this API operation, Amazon Web Services registers your self-declaration that you\u2019re an authorized business reseller registered with the Royal Malaysia Customs Department (RMCD), and have a valid SST number. Amazon Web Services reserves the right to seek additional information and/or take other actions to support your self-declaration as appropriate. Amazon Web Services is currently registered under the following service tax codes. You must include at least one of the service tax codes in the service tax code strings to declare yourself as an authorized registered business reseller. Taxable service and service tax codes: Consultancy - 9907061674 Training or coaching service - 9907071685 IT service - 9907101676 Digital services and electronic medium - 9907121690 Nepal The sector valid values are Business and Individual. Saudi Arabia For address, you must specify addressLine3. South Korea You must specify the certifiedEmailId and legalName in the TaxRegistrationEntry object. Use Korean characters for legalName. You must specify the businessRepresentativeName, itemOfBusiness, and lineOfBusiness in the southKoreaAdditionalInfo field of the additionalTaxInformation object. Use Korean characters for these fields. You must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. For the address object, use Korean characters for addressLine1, addressLine2 city, postalCode, and stateOrRegion. Spain You must specify the registrationType in the spainAdditionalInfo field of the additionalTaxInformation object. If the registrationType is Local, you must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Turkey You must specify the sector in the taxRegistrationEntry object. If your sector is Business, Individual, or Government: Specify the taxOffice. If your sector is Individual, don't enter this value. (Optional) Specify the kepEmailId. If your sector is Individual, don't enter this value. Note: In the Tax Settings page of the Billing console, Government appears as Public institutions If your sector is Business and you're subject to KDV tax, you must specify your industry in the industries field. For address, you must specify districtOrCounty. Ukraine The sector valid values are Business and Individual", options: [ { name: "--account-ids", @@ -73,6 +73,38 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-supplemental-tax-registration", + description: + "Deletes a supplemental tax registration for a single account", + options: [ + { + name: "--authority-id", + description: + "The unique authority Id for the supplemental TRN information that needs to be deleted", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-tax-registration", description: @@ -174,6 +206,69 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-supplemental-tax-registrations", + description: + "Retrieves supplemental tax registrations for a single account", + options: [ + { + name: "--max-results", + description: + "The number of taxRegistrations results you want in one response", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: "The token to retrieve the next set of results", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-tax-registrations", description: @@ -237,10 +332,41 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "put-supplemental-tax-registration", + description: "Stores supplemental tax registration for a single account", + options: [ + { + name: "--tax-registration-entry", + description: + "The supplemental TRN information that will be stored for the caller account ID", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "put-tax-registration", description: - "Adds or updates tax registration for a single account. You can't set a TRN if there's a pending TRN. You'll need to delete the pending TRN first. To call this API operation for specific countries, see the following country-specific requirements. Bangladesh You must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Brazil You must complete the tax registration process in the Payment preferences page in the Billing and Cost Management console. After your TRN and billing address are verified, you can call this API operation. For Amazon Web Services accounts created through Organizations, you can call this API operation when you don't have a billing address. Georgia The valid personType values are Physical Person and Business. Kenya You must specify the personType in the kenyaAdditionalInfo field of the additionalTaxInformation object. If the personType is Physical Person, you must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Malaysia If you use this operation to set a tax registration number (TRN) in Malaysia, only resellers with a valid sales and service tax (SST) number are required to provide tax registration information. By using this API operation to set a TRN in Malaysia, Amazon Web Services will regard you as self-declaring that you're an authorized business reseller registered with the Royal Malaysia Customs Department (RMCD) and have a valid SST number. Amazon Web Services reserves the right to seek additional information and/or take other actions to support your self-declaration as appropriate. If you're not a reseller of Amazon Web Services, we don't recommend that you use this operation to set the TRN in Malaysia. Only use this API operation to upload the TRNs for accounts through which you're reselling Amazon Web Services. Amazon Web Services is currently registered under the following service tax codes. You must include at least one of the service tax codes in the service tax code strings to declare yourself as an authorized registered business reseller. Taxable service and service tax codes: Consultancy - 9907061674 Training or coaching service - 9907071685 IT service - 9907101676 Digital services and electronic medium - 9907121690 Nepal The sector valid values are Business and Individual. Saudi Arabia For address, you must specify addressLine3. South Korea You must specify the certifiedEmailId and legalName in the TaxRegistrationEntry object. Use Korean characters for legalName. You must specify the businessRepresentativeName, itemOfBusiness, and lineOfBusiness in the southKoreaAdditionalInfo field of the additionalTaxInformation object. Use Korean characters for these fields. You must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. For the address object, use Korean characters for addressLine1, addressLine2 city, postalCode, and stateOrRegion. Spain You must specify the registrationType in the spainAdditionalInfo field of the additionalTaxInformation object. If the registrationType is Local, you must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Turkey You must specify the sector in the taxRegistrationEntry object. If your sector is Business, Individual, or Government: Specify the taxOffice. If your sector is Individual, don't enter this value. (Optional) Specify the kepEmailId. If your sector is Individual, don't enter this value. Note: In the Tax Settings page of the Billing console, Government appears as Public institutions If your sector is Business and you're subject to KDV tax, you must specify your industry in the industries field. For address, you must specify districtOrCounty. Ukraine The sector valid values are Business and Individual", + "Adds or updates tax registration for a single account. You can't set a TRN if there's a pending TRN. You'll need to delete the pending TRN first. To call this API operation for specific countries, see the following country-specific requirements. Bangladesh You must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Brazil You must complete the tax registration process in the Payment preferences page in the Billing and Cost Management console. After your TRN and billing address are verified, you can call this API operation. For Amazon Web Services accounts created through Organizations, you can call this API operation when you don't have a billing address. Georgia The valid personType values are Physical Person and Business. Kenya You must specify the personType in the kenyaAdditionalInfo field of the additionalTaxInformation object. If the personType is Physical Person, you must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Malaysia The sector valid values are Business and Individual. RegistrationType valid values are NRIC for individual, and TIN and sales and service tax (SST) for Business. For individual, you can specify the taxInformationNumber in MalaysiaAdditionalInfo with NRIC type, and a valid MyKad or NRIC number. For business, you must specify a businessRegistrationNumber in MalaysiaAdditionalInfo with a TIN type and tax identification number. For business resellers, you must specify a businessRegistrationNumber and taxInformationNumber in MalaysiaAdditionalInfo with a sales and service tax (SST) type and a valid SST number. For business resellers with service codes, you must specify businessRegistrationNumber, taxInformationNumber, and distinct serviceTaxCodes in MalaysiaAdditionalInfo with a SST type and valid sales and service tax (SST) number. By using this API operation, Amazon Web Services registers your self-declaration that you\u2019re an authorized business reseller registered with the Royal Malaysia Customs Department (RMCD), and have a valid SST number. Amazon Web Services reserves the right to seek additional information and/or take other actions to support your self-declaration as appropriate. Amazon Web Services is currently registered under the following service tax codes. You must include at least one of the service tax codes in the service tax code strings to declare yourself as an authorized registered business reseller. Taxable service and service tax codes: Consultancy - 9907061674 Training or coaching service - 9907071685 IT service - 9907101676 Digital services and electronic medium - 9907121690 Nepal The sector valid values are Business and Individual. Saudi Arabia For address, you must specify addressLine3. South Korea You must specify the certifiedEmailId and legalName in the TaxRegistrationEntry object. Use Korean characters for legalName. You must specify the businessRepresentativeName, itemOfBusiness, and lineOfBusiness in the southKoreaAdditionalInfo field of the additionalTaxInformation object. Use Korean characters for these fields. You must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. For the address object, use Korean characters for addressLine1, addressLine2 city, postalCode, and stateOrRegion. Spain You must specify the registrationType in the spainAdditionalInfo field of the additionalTaxInformation object. If the registrationType is Local, you must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Turkey You must specify the sector in the taxRegistrationEntry object. If your sector is Business, Individual, or Government: Specify the taxOffice. If your sector is Individual, don't enter this value. (Optional) Specify the kepEmailId. If your sector is Individual, don't enter this value. Note: In the Tax Settings page of the Billing console, Government appears as Public institutions If your sector is Business and you're subject to KDV tax, you must specify your industry in the industries field. For address, you must specify districtOrCounty. Ukraine The sector valid values are Business and Individual", options: [ { name: "--account-id", diff --git a/src/aws/workmail.ts b/src/aws/workmail.ts index d2076ff960cb..7422d7151b12 100644 --- a/src/aws/workmail.ts +++ b/src/aws/workmail.ts @@ -339,6 +339,51 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-identity-center-application", + description: + "Creates the WorkMail application in IAM Identity Center that can be used later in the WorkMail - IdC integration. For more information, see PutIdentityProviderConfiguration. This action does not affect the authentication settings for any WorkMail organizations", + options: [ + { + name: "--name", + description: "The name of the IAM Identity Center application", + args: { + name: "string", + }, + }, + { + name: "--instance-arn", + description: "The Amazon Resource Name (ARN) of the instance", + args: { + name: "string", + }, + }, + { + name: "--client-token", + description: "The idempotency token associated with the request", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-impersonation-role", description: @@ -728,6 +773,14 @@ const completionSpec: Fig.Spec = { description: "If this parameter is enabled, the user will be hidden from the address book", }, + { + name: "--identity-provider-user-id", + description: + "User ID from the IAM Identity Center. If this parameter is empty it will be updated automatically when the user logs in for the first time to the mailbox associated with WorkMail", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -943,6 +996,68 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-identity-center-application", + description: + "Deletes the IAM Identity Center application from WorkMail. This action does not affect the authentication settings for any WorkMail organizations", + options: [ + { + name: "--application-arn", + description: "The Amazon Resource Name (ARN) of the application", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "delete-identity-provider-configuration", + description: + "Disables the integration between IdC and WorkMail. Authentication will continue with the directory as it was before the IdC integration. You might have to reset your directory passwords and reconfigure your desktop and mobile email clients", + options: [ + { + name: "--organization-id", + description: "The Organization ID", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-impersonation-role", description: @@ -1155,6 +1270,54 @@ const completionSpec: Fig.Spec = { description: "Deletes a WorkMail organization even if the organization has enabled users", }, + { + name: "--delete-identity-center-application", + description: + "Deletes IAM Identity Center application for WorkMail. This action does not affect authentication settings for any organization", + }, + { + name: "--no-delete-identity-center-application", + description: + "Deletes IAM Identity Center application for WorkMail. This action does not affect authentication settings for any organization", + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "delete-personal-access-token", + description: + "Deletes the Personal Access Token from the provided WorkMail Organization", + options: [ + { + name: "--organization-id", + description: "The Organization ID", + args: { + name: "string", + }, + }, + { + name: "--personal-access-token-id", + description: "The Personal Access Token ID", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -1478,6 +1641,37 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "describe-identity-provider-configuration", + description: + "Returns detailed information on the current IdC setup for the WorkMail organization", + options: [ + { + name: "--organization-id", + description: "The Organization ID", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "describe-inbound-dmarc-settings", description: @@ -2113,6 +2307,44 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-personal-access-token-metadata", + description: + "Requests details of a specific Personal Access Token within the WorkMail organization", + options: [ + { + name: "--organization-id", + description: "The Organization ID", + args: { + name: "string", + }, + }, + { + name: "--personal-access-token-id", + description: "The Personal Access Token ID", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-access-control-rules", description: @@ -2894,6 +3126,83 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-personal-access-tokens", + description: "Returns a summary of your Personal Access Tokens", + options: [ + { + name: "--organization-id", + description: "The Organization ID", + args: { + name: "string", + }, + }, + { + name: "--user-id", + description: "The WorkMail User ID", + args: { + name: "string", + }, + }, + { + name: "--next-token", + description: + "The token from the previous response to query the next page", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum amount of items that should be returned in a response", + args: { + name: "integer", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-resource-delegates", description: @@ -3320,6 +3629,58 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "put-identity-provider-configuration", + description: + "Enables integration between IAM Identity Center (IdC) and WorkMail to proxy authentication requests for mailbox users. You can connect your IdC directory or your external directory to WorkMail through IdC and manage access to WorkMail mailboxes in a single place. For enhanced protection, you could enable Multifactor Authentication (MFA) and Personal Access Tokens", + options: [ + { + name: "--organization-id", + description: "The ID of the WorkMail Organization", + args: { + name: "string", + }, + }, + { + name: "--authentication-mode", + description: "The authentication mode used in WorkMail", + args: { + name: "string", + }, + }, + { + name: "--identity-center-configuration", + description: "The details of the IAM Identity Center configuration", + args: { + name: "structure", + }, + }, + { + name: "--personal-access-token-configuration", + description: "The details of the Personal Access Token configuration", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "put-inbound-dmarc-settings", description: @@ -3987,7 +4348,7 @@ const completionSpec: Fig.Spec = { }, { name: "update-group", - description: "Updates attibutes in a group", + description: "Updates attributes in a group", options: [ { name: "--organization-id", @@ -4500,7 +4861,7 @@ const completionSpec: Fig.Spec = { }, { name: "--zip-code", - description: "Updates the user's zipcode", + description: "Updates the user's zip code", args: { name: "string", }, @@ -4526,6 +4887,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--identity-provider-user-id", + description: + "User ID from the IAM Identity Center. If this parameter is empty it will be updated automatically when the user logs in for the first time to the mailbox associated with WorkMail", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: